diff --git a/go.mod b/go.mod index cfcb6c0f57..2e644e4e79 100644 --- a/go.mod +++ b/go.mod @@ -14,51 +14,54 @@ replace ( require ( github.com/blang/semver/v4 v4.0.0 github.com/cilium/charts v0.0.0-20231113210111-5a4126644d7d - github.com/cilium/cilium v1.15.0-pre.2 + github.com/cilium/cilium v1.15.0-pre.3 github.com/cilium/hubble v0.12.2 github.com/cilium/tetragon/pkg/k8s v0.0.0-20231127174521-c97da4b42413 github.com/cilium/workerpool v1.2.0 github.com/cloudflare/cfssl v1.6.4 github.com/distribution/reference v0.5.0 - github.com/go-openapi/strfmt v0.21.7 + github.com/go-openapi/strfmt v0.21.8 github.com/google/gops v0.3.28 github.com/mholt/archiver/v3 v3.5.1 github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace - golang.org/x/exp v0.0.0-20231006140011-7918f672742d + golang.org/x/exp v0.0.0-20231127185646-65229373498e golang.org/x/mod v0.14.0 google.golang.org/grpc v1.59.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c helm.sh/helm/v3 v3.13.2 - k8s.io/api v0.28.4 - k8s.io/apimachinery v0.28.4 - k8s.io/cli-runtime v0.28.2 - k8s.io/client-go v0.28.4 - k8s.io/klog/v2 v2.100.1 - sigs.k8s.io/yaml v1.3.0 + k8s.io/api v0.29.0-rc.1 + k8s.io/apimachinery v0.29.0-rc.1 + k8s.io/cli-runtime v0.29.0-rc.1 + k8s.io/client-go v0.29.0-rc.1 + k8s.io/klog/v2 v2.110.1 + sigs.k8s.io/yaml v1.4.0 ) require ( github.com/Microsoft/hcsshim v0.11.0 // indirect - github.com/cilium/ebpf v0.12.2 // indirect - github.com/cilium/proxy v0.0.0-20231018073547-ab187719b71b // indirect + github.com/cilium/ebpf v0.12.3 // indirect + github.com/cilium/proxy v0.0.0-20231031145409-f19708f3d018 // indirect github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/sagikazarmark/locafero v0.3.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/dig v1.17.1 // indirect go.uber.org/multierr v1.11.0 // indirect - go4.org/netipx v0.0.0-20230824141953-6213f710f925 // indirect - google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect + go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect + google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) @@ -89,7 +92,7 @@ require ( github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect - github.com/emicklei/go-restful/v3 v3.10.2 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect github.com/evanphx/json-patch v5.7.0+incompatible github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect @@ -97,18 +100,18 @@ require ( github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.4 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/loads v0.21.2 // indirect github.com/go-openapi/runtime v0.26.0 // indirect - github.com/go-openapi/spec v0.20.9 // indirect + github.com/go-openapi/spec v0.20.11 // indirect github.com/go-openapi/swag v0.22.4 // indirect - github.com/go-openapi/validate v0.22.1 // indirect + github.com/go-openapi/validate v0.22.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -120,12 +123,12 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.4.0 // indirect - github.com/gorilla/mux v1.8.0 // indirect + github.com/gorilla/mux v1.8.1 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/huandu/xstrings v1.4.0 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmoiron/sqlx v1.3.5 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -181,14 +184,14 @@ require ( github.com/shopspring/decimal v1.3.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/afero v1.10.0 // indirect - github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/viper v1.17.0 // indirect github.com/stretchr/testify v1.8.4 github.com/subosito/gotenv v1.6.0 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/ulikunitz/xz v0.5.10 // indirect - github.com/vishvananda/netlink v1.2.1-beta.2.0.20231024175852-77df5d35f725 // indirect + github.com/vishvananda/netlink v1.2.1-beta.2.0.20231127184239-0ced8385386a // indirect github.com/vishvananda/netns v0.0.4 // indirect github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect @@ -199,32 +202,32 @@ require ( github.com/yusufpapurcu/wmi v1.2.3 // indirect github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc // indirect github.com/zmap/zlint/v3 v3.1.0 // indirect - go.mongodb.org/mongo-driver v1.11.9 // indirect - go.opentelemetry.io/otel v1.19.0 // indirect - go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.mongodb.org/mongo-driver v1.12.0 // indirect + go.opentelemetry.io/otel v1.21.0 // indirect + go.opentelemetry.io/otel/trace v1.21.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect - golang.org/x/crypto v0.14.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.12.0 // indirect + golang.org/x/crypto v0.16.0 // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/oauth2 v0.13.0 // indirect golang.org/x/sync v0.5.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/time v0.3.0 // indirect - google.golang.org/appengine v1.6.7 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/term v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/appengine v1.6.8 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 - k8s.io/apiextensions-apiserver v0.28.4 - k8s.io/apiserver v0.28.4 // indirect - k8s.io/component-base v0.28.4 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/apiextensions-apiserver v0.29.0-rc.1 + k8s.io/apiserver v0.29.0-rc.1 // indirect + k8s.io/component-base v0.29.0-rc.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/kubectl v0.28.2 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect oras.land/oras-go v1.2.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kyaml v0.14.4-0.20230814161922-911ddcda40a8 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 718e1528cd..8c62e9d71c 100644 --- a/go.sum +++ b/go.sum @@ -61,8 +61,6 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.11.0 h1:7EFNIY4igHEXUdj1zXgAyU3fLc7QfOKHbkldRVTBdiM= github.com/Microsoft/hcsshim v0.11.0/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -102,14 +100,14 @@ github.com/cilium/charts v0.0.0-20231113210111-5a4126644d7d h1:s2G6szOXRD1lXMI3B github.com/cilium/charts v0.0.0-20231113210111-5a4126644d7d/go.mod h1:M3C9VOlFvRzuV+a01t07Tw4uFLSfkCH3L542IWjf6BU= github.com/cilium/checkmate v1.0.3 h1:CQC5eOmlAZeEjPrVZY3ZwEBH64lHlx9mXYdUehEwI5w= github.com/cilium/checkmate v1.0.3/go.mod h1:KiBTasf39/F2hf2yAmHw21YFl3hcEyP4Yk6filxc12A= -github.com/cilium/cilium v1.15.0-pre.2 h1:VzGFTNRutaXESrGCFdLs8c5DkjyzTJuAA26bCK3A9cU= -github.com/cilium/cilium v1.15.0-pre.2/go.mod h1:RDqPn88imAQLUlE19/C5x2wpBN+KmTqNfAY/lmANtCM= -github.com/cilium/ebpf v0.12.2 h1:cP3qL4kkl19kr/F+hKqUo9F9pPMVz1oms8C7Qj0AwWk= -github.com/cilium/ebpf v0.12.2/go.mod h1:u9H29/Iq+8cy70YqI6p5pfADkFl3vdnV2qXDg5JL0Zo= +github.com/cilium/cilium v1.15.0-pre.3 h1:HuARlBUHQxXaLp1JoCYIv/2iwKZPxcIuQ3TWtjPWKpk= +github.com/cilium/cilium v1.15.0-pre.3/go.mod h1:qlZFr8L7fBVj5dY2fUj+lr47TfM/mrBSh//9g6QS/dQ= +github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= +github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= github.com/cilium/hubble v0.12.2 h1:iSKm8Ezuonjke2APgMQ7vIOmUGj0L9Be7b/sRa7FuAs= github.com/cilium/hubble v0.12.2/go.mod h1:eKlcKF5Lynj+GXCCvqDCgu4UW9dUE1cAnYfQbFd0Bb0= -github.com/cilium/proxy v0.0.0-20231018073547-ab187719b71b h1:NYdQn/qPPkqHNSRMKWnSwhQuy2/rJn8pAl656iMdKtw= -github.com/cilium/proxy v0.0.0-20231018073547-ab187719b71b/go.mod h1:p044XccCmONGIUbx3bJ7qvHXK0RcrdvIvbTGiu/RjUA= +github.com/cilium/proxy v0.0.0-20231031145409-f19708f3d018 h1:R/QlThqx099hS6req1k2Q87fvLSRgCEicQGate9vxO4= +github.com/cilium/proxy v0.0.0-20231031145409-f19708f3d018/go.mod h1:p044XccCmONGIUbx3bJ7qvHXK0RcrdvIvbTGiu/RjUA= github.com/cilium/tetragon/pkg/k8s v0.0.0-20231127174521-c97da4b42413 h1:ANbBZd5MPBK8MJGiSMVb7Dr0tRgQhA0LlXwEQqeGxds= github.com/cilium/tetragon/pkg/k8s v0.0.0-20231127174521-c97da4b42413/go.mod h1:phk10izbnP8+eLw/fnMZTaPyxNcZIlyarWBR+ncQ2cQ= github.com/cilium/workerpool v1.2.0 h1:Wc2iOPTvCgWKQXeq4L5tnx4QFEI+z5q1+bSpSS0cnAY= @@ -163,8 +161,8 @@ github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNE github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY= github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= -github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= -github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -183,8 +181,8 @@ github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBd github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= -github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= -github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= @@ -197,87 +195,55 @@ github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpj github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= -github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= -github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= -github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/spec v0.20.11 h1:J/TzFDLTt4Rcl/l1PmyErvkqlJDncGvPTMnCI39I4gY= +github.com/go-openapi/spec v0.20.11/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= -github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/strfmt v0.21.8 h1:VYBUoKYRLAlgKDrIxR/I0lKrztDQ0tuTDrbhLVP8Erg= +github.com/go-openapi/strfmt v0.21.8/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= -github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/validate v0.22.3 h1:KxG9mu5HBRYbecRb37KRCihvGGtND2aXziBAv0NNfyI= +github.com/go-openapi/validate v0.22.3/go.mod h1:kVxh31KbfsxU8ZyoHaDbLBWU5CnMdqBUEtadQ2G4d5M= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -311,6 +277,7 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -344,6 +311,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark= github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7sk4c= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -373,9 +342,11 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= @@ -399,14 +370,12 @@ github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -416,8 +385,6 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -431,7 +398,6 @@ github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -463,7 +429,6 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= @@ -521,16 +486,18 @@ github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nwaples/rardecode v1.1.0 h1:vSxaY8vQhOcVr4mm5e8XllHWTiM4JF507A0Katqw7MQ= github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= -github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= @@ -538,7 +505,6 @@ github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/ github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -554,7 +520,6 @@ github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9 github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= @@ -587,8 +552,6 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= @@ -614,9 +577,6 @@ github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5g github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -625,12 +585,10 @@ github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIK github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace h1:9PNP1jnUjRhfmGMlkXHjYPishpcw4jpSt/V/xYY3FMA= github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -655,8 +613,6 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= -github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= @@ -665,8 +621,8 @@ github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oW github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/vishvananda/netlink v1.2.1-beta.2.0.20231024175852-77df5d35f725 h1:jXQwcl0Ga5tfyej8AdhADlg1R4OcbpSVRWrpn4ihl8M= -github.com/vishvananda/netlink v1.2.1-beta.2.0.20231024175852-77df5d35f725/go.mod h1:whJevzBpTrid75eZy99s3DqCmy05NfibNaF2Ol5Ox5A= +github.com/vishvananda/netlink v1.2.1-beta.2.0.20231127184239-0ced8385386a h1:PdKmLjqKUM8AfjGqDbrF/C56RvuGFDMYB0Z+8TMmGpU= +github.com/vishvananda/netlink v1.2.1-beta.2.0.20231127184239-0ced8385386a/go.mod h1:whJevzBpTrid75eZy99s3DqCmy05NfibNaF2Ol5Ox5A= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= @@ -674,10 +630,10 @@ github.com/weppos/publicsuffix-go v0.13.1-0.20210123135404-5fd73613514e/go.mod h github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b h1:FsyNrX12e5BkplJq7wKOLk0+C6LZ+KGXvuEcKUYm5ss= github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -710,11 +666,9 @@ github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc h1:zkGwegkOW709y0oiAr github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc/go.mod h1:FM4U1E3NzlNMRnSUTU3P1UdukWhYGifqEsjk9fn7BCk= github.com/zmap/zlint/v3 v3.1.0 h1:WjVytZo79m/L1+/Mlphl09WBob6YTGljN5IGWZFpAv0= github.com/zmap/zlint/v3 v3.1.0/go.mod h1:L7t8s3sEKkb0A2BxGy1IWrxt1ZATa1R4QfJZaQOD3zU= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.11.9 h1:JY1e2WLxwNuwdBAPgQxjf4BWweUGP86lF55n89cGZVA= -go.mongodb.org/mongo-driver v1.11.9/go.mod h1:P8+TlbZtPFgjUrmnIF41z97iDnSMswJJu6cztZSlCTg= +go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE= +go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -723,14 +677,14 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= -go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= @@ -741,15 +695,13 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go4.org/netipx v0.0.0-20230824141953-6213f710f925 h1:eeQDDVKFkx0g4Hyy8pHgmZaK0EqB4SD6rvKbUdN3ziQ= -go4.org/netipx v0.0.0-20230824141953-6213f710f925/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -757,8 +709,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -769,8 +721,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No= +golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -831,12 +783,11 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -846,13 +797,12 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= -golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -868,13 +818,9 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -907,7 +853,6 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -923,45 +868,42 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1003,8 +945,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1034,8 +976,9 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1072,12 +1015,12 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a h1:fwgW9j3vHirt4ObdHoYNwuO24BEZjSzbh+zPaNWoiY8= -google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE= -google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= -google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1130,8 +1073,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= @@ -1145,24 +1086,24 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= -k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= -k8s.io/apiextensions-apiserver v0.28.4 h1:AZpKY/7wQ8n+ZYDtNHbAJBb+N4AXXJvyZx6ww6yAJvU= -k8s.io/apiextensions-apiserver v0.28.4/go.mod h1:pgQIZ1U8eJSMQcENew/0ShUTlePcSGFq6dxSxf2mwPM= -k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8= -k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg= -k8s.io/apiserver v0.28.4 h1:BJXlaQbAU/RXYX2lRz+E1oPe3G3TKlozMMCZWu5GMgg= -k8s.io/apiserver v0.28.4/go.mod h1:Idq71oXugKZoVGUUL2wgBCTHbUR+FYTWa4rq9j4n23w= -k8s.io/cli-runtime v0.28.2 h1:64meB2fDj10/ThIMEJLO29a1oujSm0GQmKzh1RtA/uk= -k8s.io/cli-runtime v0.28.2/go.mod h1:bTpGOvpdsPtDKoyfG4EG041WIyFZLV9qq4rPlkyYfDA= -k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY= -k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4= -k8s.io/component-base v0.28.4 h1:c/iQLWPdUgI90O+T9TeECg8o7N3YJTiuz2sKxILYcYo= -k8s.io/component-base v0.28.4/go.mod h1:m9hR0uvqXDybiGL2nf/3Lf0MerAfQXzkfWhUY58JUbU= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/api v0.29.0-rc.1 h1:dsv3X3/+3Fgwnaqw53Pa4sV8S9kQX7pXb+/lPXPiBFo= +k8s.io/api v0.29.0-rc.1/go.mod h1:BX6ZTejt0Sa30eXx46r9LPXKgsJWX2vlbNTu8QfOkCQ= +k8s.io/apiextensions-apiserver v0.29.0-rc.1 h1:qS9G1R2bSGifRx4WM6aI36V7Xs5kIJ1aOdjnU+Q7euc= +k8s.io/apiextensions-apiserver v0.29.0-rc.1/go.mod h1:tnV28e3fqQJ2cV6U02qxpcQRcdJHadWn92GjS1U7004= +k8s.io/apimachinery v0.29.0-rc.1 h1:ReoN5k+8AEn2sj8//kcWsPbCH0dmY0Axy34XNJz7CsA= +k8s.io/apimachinery v0.29.0-rc.1/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= +k8s.io/apiserver v0.29.0-rc.1 h1:5w+V5PxqZTCnk43Io9wiODMHCJvujPbrBNfvZxvrnbc= +k8s.io/apiserver v0.29.0-rc.1/go.mod h1:m+p0MCsTaH6s+lH6yC+zyzTFV6pKmJpfWogbj05CSI8= +k8s.io/cli-runtime v0.29.0-rc.1 h1:FDpo4x/OD0+1OmGjCZ2ZrAEvv7qRVhfy2gE4shRjISE= +k8s.io/cli-runtime v0.29.0-rc.1/go.mod h1:SnKOqYUpxCLiQS2rfLaD9fRDbqh6UMuMjdTkUYu8dck= +k8s.io/client-go v0.29.0-rc.1 h1:CsiZED5XzximZzU/vB0ph3wQ4kJWsZGaDX1V9LSbTdw= +k8s.io/client-go v0.29.0-rc.1/go.mod h1:PyVpVRI/sTHNqVnztGOu52YAPiBn6OqTjlumG5D3sZM= +k8s.io/component-base v0.29.0-rc.1 h1:YNXLyyB7x0lGqclAXmUbREnRiESVFPJrX0Nqz0+XHKw= +k8s.io/component-base v0.29.0-rc.1/go.mod h1:BxIwavzsdJarfgTSRgaQa7IbepGVNdFSiEsLGUaH4QA= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/kubectl v0.28.2 h1:fOWOtU6S0smdNjG1PB9WFbqEIMlkzU5ahyHkc7ESHgM= k8s.io/kubectl v0.28.2/go.mod h1:6EQWTPySF1fn7yKoQZHYf9TPwIl2AygHEcJoxFekr64= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= @@ -1178,7 +1119,7 @@ sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKU sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= sigs.k8s.io/kustomize/kyaml v0.14.4-0.20230814161922-911ddcda40a8 h1:BQ04Q39YoTY5v1haEHcU/8Zj392ZIIeHuvxzfDbWhSE= sigs.k8s.io/kustomize/kyaml v0.14.4-0.20230814161922-911ddcda40a8/go.mod h1:npvh9epWysfQ689Rtt/U+dpOJDTBn8kUnF1O6VzvmZA= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/vendor/github.com/cilium/cilium/AUTHORS b/vendor/github.com/cilium/cilium/AUTHORS index d303bf182f..94a6426b67 100644 --- a/vendor/github.com/cilium/cilium/AUTHORS +++ b/vendor/github.com/cilium/cilium/AUTHORS @@ -13,7 +13,7 @@ Aditya Purandare aditya.p1993@hotmail.com Aditya Sharma aditya.sharma@shopify.com Adrien Trouillaud adrienjt@users.noreply.github.com Ahmed Bebars 1381372+abebars@users.noreply.github.com -Akhil Velagapudi avelagap@google.com +Akhil Velagapudi 4@4khil.com Akshat Agarwal humancalico@disroot.org Alan Kutniewski kutniewski@google.com Alban Crequy alban@kinvolk.io @@ -67,6 +67,7 @@ Anton Protopopov aspsk@isovalent.com Anton Tykhyy atykhyy@gmail.com Anurag Aggarwal anurag.aggarwal@flipkart.com Archana Shinde archana.m.shinde@intel.com +Ardika Bagus me@ardikabs.com Arika Chen eaglesora@gmail.com Arnaud Meukam ameukam@gmail.com Arseniy Belorukov a.belorukov@team.bumble.com @@ -126,6 +127,8 @@ chentanjun tanjunchen20@gmail.com chenyahui chenyahui9@jd.com Chen Yaqi chenyaqi01@baidu.com chenyuezhou zcy.chenyue.zhou@gmail.com +chez-shanpu tomoki-sugiura@cybozu.co.jp +Chris Bannister c.bannister@gmail.com Chris Tarazi chris@isovalent.com Christian Hörtnagl christian2@univie.ac.at Christian Hüning christian.huening@finleap.com @@ -167,6 +170,7 @@ Dario Mader maderdario@gmail.com Darren Foo darren.foo@shopify.com Darren Mackintosh unixdaddy@gmail.com Darshan Chaudhary deathbullet@gmail.com +DaShaun 826271+dashaun@users.noreply.github.com David Bimmler david.bimmler@isovalent.com David Birks davidebirks@gmail.com David Bouchare david.bouchare@datadoghq.com @@ -179,6 +183,7 @@ David Schlosnagle davids@palantir.com David Wolffberg 1350533+wolffberg@users.noreply.github.com Dawn lx1960753013@gmail.com dddddai dddwq@foxmail.com +Dean 22192242+saintdle@users.noreply.github.com Deepesha Burse deepesha.3007@gmail.com Deepesh Pathak deepshpathak@gmail.com Denis GERMAIN dgermain@deezer.com @@ -211,6 +216,7 @@ Dylan Reimerink dylan.reimerink@isovalent.com Ekene Nwobodo nwobodoe71@gmail.com Electron alokaks601@gmail.com El-Fadel Bonfoh elfadel@accuknox.com +eliranw 39266788+eliranw@users.noreply.github.com Ellie Springsteen ellie.springsteen@appian.com Eloy Coto eloy.coto@acalustra.com Emin Aktas eminaktas34@gmail.com @@ -257,6 +263,7 @@ GH action ghabot@does.not.exist.cilium.org Gianluca Arbezzano gianarb92@gmail.com Gilberto Bertin jibi@cilium.io gjmzj jmgaozz@hotmail.com +Glen Yu glen.yu@gmail.com Glib Smaga code@gsmaga.com Gobinath Krishnamoorthy gobinathk@google.com Gowtham Sundara gowtham.sundara@rapyuta-robotics.com @@ -282,9 +289,11 @@ Huagong Wang wanghuagong@kylinos.cn huangxuesen huangxuesen@kuaishou.com Hui Kong hui.kong@qunar.com Hunter Massey hmassey@tradestation.com +Husni Alhamdani dhanielluis@gmail.com hxysayhi 51870525+hxysayhi@users.noreply.github.com Ian Vernon ian@cilium.io Ifeanyi Ubah ify1992@yahoo.com +Iiqbal2000 iqbalhafizh2000@gmail.com Ilya Dmitrichenko errordeveloper@gmail.com Ilya Shaisultanov ilya.shaisultanov@gmail.com Ioannis Androulidakis androulidakis.ioannis@gmail.com @@ -312,6 +321,7 @@ Jiang Wang jiang.wang@bytedance.com Jianlin Lv Jianlin.Lv@arm.com Jian Zeng anonymousknight96@gmail.com JieJhih Jhang jiejhihjhang@gmail.com +jignyasamishra iamjignyasa@gmail.com Jim Angel jimangel@google.com.com Jim Ntosas ntosas@gmail.com JinLin Fu withlin@apache.org @@ -327,6 +337,7 @@ Joey Espinosa jlouis.espinosa@gmail.com Johannes Liebermann johanan.liebermann@gmail.com John Fastabend john.fastabend@gmail.com John Gardiner Myers jgmyers@proofpoint.com +John Howard howardjohn@google.com John Watson johnw@planetscale.com John Zheng johnzhengaz@gmail.com Jomen Xiao jomenxiao@gmail.com @@ -338,6 +349,7 @@ Joseph-Irving joseph.irving500@gmail.com Joseph Sheng jiajun.sheng@microfocus.com Joseph Stevens thejosephstevens@gmail.com Joshua Roppo joshroppo@gmail.com +jshr-w shjayaraman@microsoft.com Juan Jimenez-Anca cortopy@users.noreply.github.com Juha Tiensyrjä juha.tiensyrja@ouraring.com Julian Wiedmann jwi@isovalent.com @@ -383,6 +395,7 @@ liuxu liuxu623@gmail.com Livingstone S E livingstone.s.e@gmail.com Li Yiheng lyhutopi@gmail.com Liz Rice liz@lizrice.com +log1cb0mb nabeelnrana@gmail.com LongHui Li longhui.li@woqutech.com Lorenz Bauer lmb@isovalent.com Lorenzo Fundaró lorenzofundaro@gmail.com @@ -461,6 +474,7 @@ Michal Siwinski siwy@google.com Michi Mutsuzaki michi@isovalent.com Mike Fedosin mfedosin@gmail.com MikeLing sabergeass@gmail.com +Mike Mwanje mwanjemike767@gmail.com Mitch Hulscher mitch.hulscher@lib.io Moh Ahmed moh.ahmed@cengn.ca Mohammad Yosefpor 47300215+m-yosefpor@users.noreply.github.com @@ -479,6 +493,7 @@ necatican necaticanyildirim@gmail.com Neela Jacques neela@isovalent.com Neil Seward neil.seward@elasticpath.com Neil Wilson neil@aldur.co.uk +Neutrollized glen.yu@gmail.com Nick M 4718+rkage@users.noreply.github.com Nick Young nick@isovalent.com Niclas Mietz solidnerd@users.noreply.github.com @@ -513,6 +528,7 @@ Parth Patel parth.psu@gmail.com Patrice Chalin chalin@cncf.io Patrice Peterson patrice.peterson@mailbox.org Patrick Mahoney pmahoney@greenkeytech.com +Patrick O’Brien patrick.obrien@thetradedesk.com Patrick Reich patrick@neodyme.io Pat Riehecky riehecky@fnal.gov Patrik Cyvoct patrik@ptrk.io @@ -556,6 +572,7 @@ Reilly Brogan reilly@reillybrogan.com Rei Shimizu Shikugawa@gmail.com Rémy Léone rleone@scaleway.com Renat Tuktarov yandzeek@gmail.com +Renaud Gaubert renaud@openai.com Rene Luria rene@luria.ch René Veenhuis re.veenhuis@gmail.com Rene Zbinden rene.zbinden@postfinance.ch @@ -563,7 +580,7 @@ Richard Lavoie richard.lavoie@logmein.com Richard Tweed RichardoC@users.noreply.github.com Ricky Ho horicky78@gmail.com Rio Kierkels riokierkels@gmail.com -Robin Gögge r.goegge@isovalent.com +Robin Gögge r.goegge@gmail.com Robin Hahling robin.hahling@gw-computing.net Rocky Chen 40374064+rockc2020@users.noreply.github.com Rodrigo Chacon rochacon@gmail.com @@ -654,7 +671,7 @@ Tobias Klauser tobias@cilium.io Tobias Kohlbau tobias@kohlbau.de Tobias Mose mosetobias@gmail.com Tom Hadlaw tom.hadlaw@isovalent.com -Tomoki Sugiura cheztomo513@gmail.com +Tomoki Sugiura tomoki-sugiura@cybozu.co.jp Tomoya Fujita Tomoya.Fujita@sony.com Tom Payne twpayne@gmail.com Tony Lambiris tony@criticalstack.com @@ -666,12 +683,13 @@ Travis Glenn Hansen travisghansen@yahoo.com Trevor Roberts Jr Trevor.Roberts.Jr@gmail.com Trevor Tao trevor.tao@arm.com Umesh Keerthy B S umesh.freelance@gmail.com +usiegl00 50933431+usiegl00@users.noreply.github.com Vadim Ponomarev velizarx@gmail.com vakr vakr@microsoft.com Valas Valancius valas@google.com Vance Li vanceli@tencent.com Vigneshwaren Sunder vickymailed@gmail.com -viktor-kurchenko 69600804+viktor-kurchenko@users.noreply.github.com +viktor-kurchenko viktor.kurchenko@isovalent.com Viktor Kuzmin kvaster@gmail.com Viktor Oreshkin imselfish@stek29.rocks Ville Ojamo bluikko@users.noreply.github.com @@ -720,7 +738,7 @@ Yugo Kobayashi kobdotsh@gmail.com yulng wei.yang@daocloud.io Yurii Dzobak yurii.dzobak@lotusflare.com Yurii Komar Subreptivus@gmail.com -Yusuke Suzuki yusuke-suzuki@cybozu.co.jp +Yusuke Suzuki yusuke.suzuki@isovalent.com Yutaro Hayakawa yutaro.hayakawa@isovalent.com Yves Blusseau yves.blusseau@acoss.fr yylt yang8518296@163.com diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go index 2713952d46..e7ee6fb081 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go @@ -35,6 +35,8 @@ type ClientOption func(*runtime.ClientOperation) type ClientService interface { GetBgpPeers(params *GetBgpPeersParams, opts ...ClientOption) (*GetBgpPeersOK, error) + GetBgpRoutePolicies(params *GetBgpRoutePoliciesParams, opts ...ClientOption) (*GetBgpRoutePoliciesOK, error) + GetBgpRoutes(params *GetBgpRoutesParams, opts ...ClientOption) (*GetBgpRoutesOK, error) SetTransport(transport runtime.ClientTransport) @@ -83,6 +85,46 @@ func (a *Client) GetBgpPeers(params *GetBgpPeersParams, opts ...ClientOption) (* panic(msg) } +/* +GetBgpRoutePolicies lists b g p route policies configured in b g p control plane + +Retrieves route policies from BGP Control Plane. +*/ +func (a *Client) GetBgpRoutePolicies(params *GetBgpRoutePoliciesParams, opts ...ClientOption) (*GetBgpRoutePoliciesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetBgpRoutePoliciesParams() + } + op := &runtime.ClientOperation{ + ID: "GetBgpRoutePolicies", + Method: "GET", + PathPattern: "/bgp/route-policies", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetBgpRoutePoliciesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetBgpRoutePoliciesOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for GetBgpRoutePolicies: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* GetBgpRoutes lists b g p routes from b g p control plane r i b diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_parameters.go new file mode 100644 index 0000000000..db2421c1a4 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_parameters.go @@ -0,0 +1,169 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package bgp + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetBgpRoutePoliciesParams creates a new GetBgpRoutePoliciesParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetBgpRoutePoliciesParams() *GetBgpRoutePoliciesParams { + return &GetBgpRoutePoliciesParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetBgpRoutePoliciesParamsWithTimeout creates a new GetBgpRoutePoliciesParams object +// with the ability to set a timeout on a request. +func NewGetBgpRoutePoliciesParamsWithTimeout(timeout time.Duration) *GetBgpRoutePoliciesParams { + return &GetBgpRoutePoliciesParams{ + timeout: timeout, + } +} + +// NewGetBgpRoutePoliciesParamsWithContext creates a new GetBgpRoutePoliciesParams object +// with the ability to set a context for a request. +func NewGetBgpRoutePoliciesParamsWithContext(ctx context.Context) *GetBgpRoutePoliciesParams { + return &GetBgpRoutePoliciesParams{ + Context: ctx, + } +} + +// NewGetBgpRoutePoliciesParamsWithHTTPClient creates a new GetBgpRoutePoliciesParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetBgpRoutePoliciesParamsWithHTTPClient(client *http.Client) *GetBgpRoutePoliciesParams { + return &GetBgpRoutePoliciesParams{ + HTTPClient: client, + } +} + +/* +GetBgpRoutePoliciesParams contains all the parameters to send to the API endpoint + + for the get bgp route policies operation. + + Typically these are written to a http.Request. +*/ +type GetBgpRoutePoliciesParams struct { + + /* RouterAsn. + + Autonomous System Number (ASN) identifying a BGP virtual router instance. + If not specified, all virtual router instances are selected. + + */ + RouterAsn *int64 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get bgp route policies params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetBgpRoutePoliciesParams) WithDefaults() *GetBgpRoutePoliciesParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get bgp route policies params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetBgpRoutePoliciesParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) WithTimeout(timeout time.Duration) *GetBgpRoutePoliciesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) WithContext(ctx context.Context) *GetBgpRoutePoliciesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) WithHTTPClient(client *http.Client) *GetBgpRoutePoliciesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRouterAsn adds the routerAsn to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) WithRouterAsn(routerAsn *int64) *GetBgpRoutePoliciesParams { + o.SetRouterAsn(routerAsn) + return o +} + +// SetRouterAsn adds the routerAsn to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) SetRouterAsn(routerAsn *int64) { + o.RouterAsn = routerAsn +} + +// WriteToRequest writes these params to a swagger request +func (o *GetBgpRoutePoliciesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.RouterAsn != nil { + + // query param router_asn + var qrRouterAsn int64 + + if o.RouterAsn != nil { + qrRouterAsn = *o.RouterAsn + } + qRouterAsn := swag.FormatInt64(qrRouterAsn) + if qRouterAsn != "" { + + if err := r.SetQueryParam("router_asn", qRouterAsn); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_responses.go new file mode 100644 index 0000000000..fe25bc9d63 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_responses.go @@ -0,0 +1,233 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package bgp + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/cilium/cilium/api/v1/models" +) + +// GetBgpRoutePoliciesReader is a Reader for the GetBgpRoutePolicies structure. +type GetBgpRoutePoliciesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetBgpRoutePoliciesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetBgpRoutePoliciesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 500: + result := NewGetBgpRoutePoliciesInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewGetBgpRoutePoliciesDisabled() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetBgpRoutePoliciesOK creates a GetBgpRoutePoliciesOK with default headers values +func NewGetBgpRoutePoliciesOK() *GetBgpRoutePoliciesOK { + return &GetBgpRoutePoliciesOK{} +} + +/* +GetBgpRoutePoliciesOK describes a response with status code 200, with default header values. + +Success +*/ +type GetBgpRoutePoliciesOK struct { + Payload []*models.BgpRoutePolicy +} + +// IsSuccess returns true when this get bgp route policies o k response has a 2xx status code +func (o *GetBgpRoutePoliciesOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get bgp route policies o k response has a 3xx status code +func (o *GetBgpRoutePoliciesOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp route policies o k response has a 4xx status code +func (o *GetBgpRoutePoliciesOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp route policies o k response has a 5xx status code +func (o *GetBgpRoutePoliciesOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get bgp route policies o k response a status code equal to that given +func (o *GetBgpRoutePoliciesOK) IsCode(code int) bool { + return code == 200 +} + +func (o *GetBgpRoutePoliciesOK) Error() string { + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesOK %+v", 200, o.Payload) +} + +func (o *GetBgpRoutePoliciesOK) String() string { + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesOK %+v", 200, o.Payload) +} + +func (o *GetBgpRoutePoliciesOK) GetPayload() []*models.BgpRoutePolicy { + return o.Payload +} + +func (o *GetBgpRoutePoliciesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetBgpRoutePoliciesInternalServerError creates a GetBgpRoutePoliciesInternalServerError with default headers values +func NewGetBgpRoutePoliciesInternalServerError() *GetBgpRoutePoliciesInternalServerError { + return &GetBgpRoutePoliciesInternalServerError{} +} + +/* +GetBgpRoutePoliciesInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type GetBgpRoutePoliciesInternalServerError struct { + Payload models.Error +} + +// IsSuccess returns true when this get bgp route policies internal server error response has a 2xx status code +func (o *GetBgpRoutePoliciesInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get bgp route policies internal server error response has a 3xx status code +func (o *GetBgpRoutePoliciesInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp route policies internal server error response has a 4xx status code +func (o *GetBgpRoutePoliciesInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp route policies internal server error response has a 5xx status code +func (o *GetBgpRoutePoliciesInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get bgp route policies internal server error response a status code equal to that given +func (o *GetBgpRoutePoliciesInternalServerError) IsCode(code int) bool { + return code == 500 +} + +func (o *GetBgpRoutePoliciesInternalServerError) Error() string { + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesInternalServerError %+v", 500, o.Payload) +} + +func (o *GetBgpRoutePoliciesInternalServerError) String() string { + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesInternalServerError %+v", 500, o.Payload) +} + +func (o *GetBgpRoutePoliciesInternalServerError) GetPayload() models.Error { + return o.Payload +} + +func (o *GetBgpRoutePoliciesInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetBgpRoutePoliciesDisabled creates a GetBgpRoutePoliciesDisabled with default headers values +func NewGetBgpRoutePoliciesDisabled() *GetBgpRoutePoliciesDisabled { + return &GetBgpRoutePoliciesDisabled{} +} + +/* +GetBgpRoutePoliciesDisabled describes a response with status code 501, with default header values. + +BGP Control Plane disabled +*/ +type GetBgpRoutePoliciesDisabled struct { + Payload models.Error +} + +// IsSuccess returns true when this get bgp route policies disabled response has a 2xx status code +func (o *GetBgpRoutePoliciesDisabled) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get bgp route policies disabled response has a 3xx status code +func (o *GetBgpRoutePoliciesDisabled) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp route policies disabled response has a 4xx status code +func (o *GetBgpRoutePoliciesDisabled) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp route policies disabled response has a 5xx status code +func (o *GetBgpRoutePoliciesDisabled) IsServerError() bool { + return true +} + +// IsCode returns true when this get bgp route policies disabled response a status code equal to that given +func (o *GetBgpRoutePoliciesDisabled) IsCode(code int) bool { + return code == 501 +} + +func (o *GetBgpRoutePoliciesDisabled) Error() string { + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesDisabled %+v", 501, o.Payload) +} + +func (o *GetBgpRoutePoliciesDisabled) String() string { + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesDisabled %+v", 501, o.Payload) +} + +func (o *GetBgpRoutePoliciesDisabled) GetPayload() models.Error { + return o.Payload +} + +func (o *GetBgpRoutePoliciesDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_query_table_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_query_table_parameters.go new file mode 100644 index 0000000000..b936411eaa --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_query_table_parameters.go @@ -0,0 +1,236 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package statedb + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetStatedbQueryTableParams creates a new GetStatedbQueryTableParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetStatedbQueryTableParams() *GetStatedbQueryTableParams { + return &GetStatedbQueryTableParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetStatedbQueryTableParamsWithTimeout creates a new GetStatedbQueryTableParams object +// with the ability to set a timeout on a request. +func NewGetStatedbQueryTableParamsWithTimeout(timeout time.Duration) *GetStatedbQueryTableParams { + return &GetStatedbQueryTableParams{ + timeout: timeout, + } +} + +// NewGetStatedbQueryTableParamsWithContext creates a new GetStatedbQueryTableParams object +// with the ability to set a context for a request. +func NewGetStatedbQueryTableParamsWithContext(ctx context.Context) *GetStatedbQueryTableParams { + return &GetStatedbQueryTableParams{ + Context: ctx, + } +} + +// NewGetStatedbQueryTableParamsWithHTTPClient creates a new GetStatedbQueryTableParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetStatedbQueryTableParamsWithHTTPClient(client *http.Client) *GetStatedbQueryTableParams { + return &GetStatedbQueryTableParams{ + HTTPClient: client, + } +} + +/* +GetStatedbQueryTableParams contains all the parameters to send to the API endpoint + + for the get statedb query table operation. + + Typically these are written to a http.Request. +*/ +type GetStatedbQueryTableParams struct { + + /* Index. + + StateDB index name + */ + Index string + + /* Key. + + Query key (base64 encoded) + */ + Key string + + /* Lowerbound. + + If true perform a LowerBound search + */ + Lowerbound bool + + /* Table. + + StateDB table name + */ + Table string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get statedb query table params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetStatedbQueryTableParams) WithDefaults() *GetStatedbQueryTableParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get statedb query table params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetStatedbQueryTableParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get statedb query table params +func (o *GetStatedbQueryTableParams) WithTimeout(timeout time.Duration) *GetStatedbQueryTableParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get statedb query table params +func (o *GetStatedbQueryTableParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get statedb query table params +func (o *GetStatedbQueryTableParams) WithContext(ctx context.Context) *GetStatedbQueryTableParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get statedb query table params +func (o *GetStatedbQueryTableParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get statedb query table params +func (o *GetStatedbQueryTableParams) WithHTTPClient(client *http.Client) *GetStatedbQueryTableParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get statedb query table params +func (o *GetStatedbQueryTableParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithIndex adds the index to the get statedb query table params +func (o *GetStatedbQueryTableParams) WithIndex(index string) *GetStatedbQueryTableParams { + o.SetIndex(index) + return o +} + +// SetIndex adds the index to the get statedb query table params +func (o *GetStatedbQueryTableParams) SetIndex(index string) { + o.Index = index +} + +// WithKey adds the key to the get statedb query table params +func (o *GetStatedbQueryTableParams) WithKey(key string) *GetStatedbQueryTableParams { + o.SetKey(key) + return o +} + +// SetKey adds the key to the get statedb query table params +func (o *GetStatedbQueryTableParams) SetKey(key string) { + o.Key = key +} + +// WithLowerbound adds the lowerbound to the get statedb query table params +func (o *GetStatedbQueryTableParams) WithLowerbound(lowerbound bool) *GetStatedbQueryTableParams { + o.SetLowerbound(lowerbound) + return o +} + +// SetLowerbound adds the lowerbound to the get statedb query table params +func (o *GetStatedbQueryTableParams) SetLowerbound(lowerbound bool) { + o.Lowerbound = lowerbound +} + +// WithTable adds the table to the get statedb query table params +func (o *GetStatedbQueryTableParams) WithTable(table string) *GetStatedbQueryTableParams { + o.SetTable(table) + return o +} + +// SetTable adds the table to the get statedb query table params +func (o *GetStatedbQueryTableParams) SetTable(table string) { + o.Table = table +} + +// WriteToRequest writes these params to a swagger request +func (o *GetStatedbQueryTableParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param index + qrIndex := o.Index + qIndex := qrIndex + if qIndex != "" { + + if err := r.SetQueryParam("index", qIndex); err != nil { + return err + } + } + + // query param key + qrKey := o.Key + qKey := qrKey + if qKey != "" { + + if err := r.SetQueryParam("key", qKey); err != nil { + return err + } + } + + // query param lowerbound + qrLowerbound := o.Lowerbound + qLowerbound := swag.FormatBool(qrLowerbound) + if qLowerbound != "" { + + if err := r.SetQueryParam("lowerbound", qLowerbound); err != nil { + return err + } + } + + // path param table + if err := r.SetPathParam("table", o.Table); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_query_table_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_query_table_responses.go new file mode 100644 index 0000000000..74bfe66f7f --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_query_table_responses.go @@ -0,0 +1,227 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package statedb + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/cilium/cilium/api/v1/models" +) + +// GetStatedbQueryTableReader is a Reader for the GetStatedbQueryTable structure. +type GetStatedbQueryTableReader struct { + formats strfmt.Registry + writer io.Writer +} + +// ReadResponse reads a server response into the received o. +func (o *GetStatedbQueryTableReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetStatedbQueryTableOK(o.writer) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewGetStatedbQueryTableBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewGetStatedbQueryTableNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + } +} + +// NewGetStatedbQueryTableOK creates a GetStatedbQueryTableOK with default headers values +func NewGetStatedbQueryTableOK(writer io.Writer) *GetStatedbQueryTableOK { + return &GetStatedbQueryTableOK{ + + Payload: writer, + } +} + +/* +GetStatedbQueryTableOK describes a response with status code 200, with default header values. + +Success +*/ +type GetStatedbQueryTableOK struct { + Payload io.Writer +} + +// IsSuccess returns true when this get statedb query table o k response has a 2xx status code +func (o *GetStatedbQueryTableOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get statedb query table o k response has a 3xx status code +func (o *GetStatedbQueryTableOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get statedb query table o k response has a 4xx status code +func (o *GetStatedbQueryTableOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get statedb query table o k response has a 5xx status code +func (o *GetStatedbQueryTableOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get statedb query table o k response a status code equal to that given +func (o *GetStatedbQueryTableOK) IsCode(code int) bool { + return code == 200 +} + +func (o *GetStatedbQueryTableOK) Error() string { + return fmt.Sprintf("[GET /statedb/query/{table}][%d] getStatedbQueryTableOK %+v", 200, o.Payload) +} + +func (o *GetStatedbQueryTableOK) String() string { + return fmt.Sprintf("[GET /statedb/query/{table}][%d] getStatedbQueryTableOK %+v", 200, o.Payload) +} + +func (o *GetStatedbQueryTableOK) GetPayload() io.Writer { + return o.Payload +} + +func (o *GetStatedbQueryTableOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetStatedbQueryTableBadRequest creates a GetStatedbQueryTableBadRequest with default headers values +func NewGetStatedbQueryTableBadRequest() *GetStatedbQueryTableBadRequest { + return &GetStatedbQueryTableBadRequest{} +} + +/* +GetStatedbQueryTableBadRequest describes a response with status code 400, with default header values. + +Invalid parameters +*/ +type GetStatedbQueryTableBadRequest struct { + Payload models.Error +} + +// IsSuccess returns true when this get statedb query table bad request response has a 2xx status code +func (o *GetStatedbQueryTableBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get statedb query table bad request response has a 3xx status code +func (o *GetStatedbQueryTableBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get statedb query table bad request response has a 4xx status code +func (o *GetStatedbQueryTableBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this get statedb query table bad request response has a 5xx status code +func (o *GetStatedbQueryTableBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this get statedb query table bad request response a status code equal to that given +func (o *GetStatedbQueryTableBadRequest) IsCode(code int) bool { + return code == 400 +} + +func (o *GetStatedbQueryTableBadRequest) Error() string { + return fmt.Sprintf("[GET /statedb/query/{table}][%d] getStatedbQueryTableBadRequest %+v", 400, o.Payload) +} + +func (o *GetStatedbQueryTableBadRequest) String() string { + return fmt.Sprintf("[GET /statedb/query/{table}][%d] getStatedbQueryTableBadRequest %+v", 400, o.Payload) +} + +func (o *GetStatedbQueryTableBadRequest) GetPayload() models.Error { + return o.Payload +} + +func (o *GetStatedbQueryTableBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetStatedbQueryTableNotFound creates a GetStatedbQueryTableNotFound with default headers values +func NewGetStatedbQueryTableNotFound() *GetStatedbQueryTableNotFound { + return &GetStatedbQueryTableNotFound{} +} + +/* +GetStatedbQueryTableNotFound describes a response with status code 404, with default header values. + +Table or Index not found +*/ +type GetStatedbQueryTableNotFound struct { +} + +// IsSuccess returns true when this get statedb query table not found response has a 2xx status code +func (o *GetStatedbQueryTableNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get statedb query table not found response has a 3xx status code +func (o *GetStatedbQueryTableNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get statedb query table not found response has a 4xx status code +func (o *GetStatedbQueryTableNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this get statedb query table not found response has a 5xx status code +func (o *GetStatedbQueryTableNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this get statedb query table not found response a status code equal to that given +func (o *GetStatedbQueryTableNotFound) IsCode(code int) bool { + return code == 404 +} + +func (o *GetStatedbQueryTableNotFound) Error() string { + return fmt.Sprintf("[GET /statedb/query/{table}][%d] getStatedbQueryTableNotFound ", 404) +} + +func (o *GetStatedbQueryTableNotFound) String() string { + return fmt.Sprintf("[GET /statedb/query/{table}][%d] getStatedbQueryTableNotFound ", 404) +} + +func (o *GetStatedbQueryTableNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go b/vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go index 69365c913f..4009fff8ed 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go @@ -36,6 +36,8 @@ type ClientOption func(*runtime.ClientOperation) type ClientService interface { GetStatedbDump(params *GetStatedbDumpParams, writer io.Writer, opts ...ClientOption) (*GetStatedbDumpOK, error) + GetStatedbQueryTable(params *GetStatedbQueryTableParams, writer io.Writer, opts ...ClientOption) (*GetStatedbQueryTableOK, error) + SetTransport(transport runtime.ClientTransport) } @@ -77,6 +79,44 @@ func (a *Client) GetStatedbDump(params *GetStatedbDumpParams, writer io.Writer, panic(msg) } +/* +GetStatedbQueryTable performs a query against a state d b table +*/ +func (a *Client) GetStatedbQueryTable(params *GetStatedbQueryTableParams, writer io.Writer, opts ...ClientOption) (*GetStatedbQueryTableOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetStatedbQueryTableParams() + } + op := &runtime.ClientOperation{ + ID: "GetStatedbQueryTable", + Method: "GET", + PathPattern: "/statedb/query/{table}", + ProducesMediaTypes: []string{"application/octet-stream"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetStatedbQueryTableReader{formats: a.formats, writer: writer}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetStatedbQueryTableOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for GetStatedbQueryTable: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + // SetTransport changes the transport on the client func (a *Client) SetTransport(transport runtime.ClientTransport) { a.transport = transport diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/README.md b/vendor/github.com/cilium/cilium/api/v1/flow/README.md index bcdfaa0b52..654751ef37 100644 --- a/vendor/github.com/cilium/cilium/api/v1/flow/README.md +++ b/vendor/github.com/cilium/cilium/api/v1/flow/README.md @@ -316,7 +316,7 @@ multiple fields are set, then all fields must match for the filter to match. | ----- | ---- | ----- | ----------- | | uuid | [string](#string) | repeated | uuid filters by a list of flow uuids. | | source_ip | [string](#string) | repeated | source_ip filters by a list of source ips. Each of the source ips can be specified as an exact match (e.g. "1.1.1.1") or as a CIDR range (e.g. "1.1.1.0/24"). | -| source_pod | [string](#string) | repeated | source_pod filters by a list of source pod name prefixes, optionally within a given namespace (e.g. "xwing", "kube-system/coredns-"). The pod name can be omitted to only filter by namespace (e.g. "kube-system/") | +| source_pod | [string](#string) | repeated | source_pod filters by a list of source pod name prefixes, optionally within a given namespace (e.g. "xwing", "kube-system/coredns-"). The pod name can be omitted to only filter by namespace (e.g. "kube-system/") or the namespace can be omitted to filter for pods in any namespace (e.g. "/xwing") | | source_fqdn | [string](#string) | repeated | source_fqdn filters by a list of source fully qualified domain names | | source_label | [string](#string) | repeated | source_labels filters on a list of source label selectors. Selectors support the full Kubernetes label selector syntax. | | source_service | [string](#string) | repeated | source_service filters on a list of source service names. This field supports the same syntax as the source_pod field. | @@ -341,6 +341,7 @@ multiple fields are set, then all fields must match for the filter to match. | http_method | [string](#string) | repeated | GET, POST, PUT, etc. methods. This type of field is well suited for an enum but every single existing place is using a string already. | | http_path | [string](#string) | repeated | http_path is a list of regular expressions to filter on the HTTP path. | | http_url | [string](#string) | repeated | http_url is a list of regular expressions to filter on the HTTP URL. | +| http_header | [HTTPHeader](#flow-HTTPHeader) | repeated | http_header is a list of key:value pairs to filter on the HTTP headers. | | tcp_flags | [TCPFlags](#flow-TCPFlags) | repeated | tcp_flags filters flows based on TCP header flags | | node_name | [string](#string) | repeated | node_name is a list of patterns to filter on the node name, e.g. "k8s*", "test-cluster/*.domain.com", "cluster-name/" etc. | | ip_version | [IPVersion](#flow-IPVersion) | repeated | filter based on IP version (ipv4 or ipv6) | @@ -1000,6 +1001,7 @@ here. | UNENCRYPTED_TRAFFIC | 195 | | | TTL_EXCEEDED | 196 | | | NO_NODE_ID | 197 | | +| DROP_RATE_LIMITED | 198 | | diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go b/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go index c91e31a569..4656fc828b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go +++ b/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go @@ -488,6 +488,7 @@ const ( DropReason_UNENCRYPTED_TRAFFIC DropReason = 195 DropReason_TTL_EXCEEDED DropReason = 196 DropReason_NO_NODE_ID DropReason = 197 + DropReason_DROP_RATE_LIMITED DropReason = 198 ) // Enum value maps for DropReason. @@ -561,6 +562,7 @@ var ( 195: "UNENCRYPTED_TRAFFIC", 196: "TTL_EXCEEDED", 197: "NO_NODE_ID", + 198: "DROP_RATE_LIMITED", } DropReason_value = map[string]int32{ "DROP_REASON_UNKNOWN": 0, @@ -631,6 +633,7 @@ var ( "UNENCRYPTED_TRAFFIC": 195, "TTL_EXCEEDED": 196, "NO_NODE_ID": 197, + "DROP_RATE_LIMITED": 198, } ) @@ -2863,7 +2866,8 @@ type FlowFilter struct { // source_pod filters by a list of source pod name prefixes, optionally // within a given namespace (e.g. "xwing", "kube-system/coredns-"). // The pod name can be omitted to only filter by namespace - // (e.g. "kube-system/") + // (e.g. "kube-system/") or the namespace can be omitted to filter for + // pods in any namespace (e.g. "/xwing") SourcePod []string `protobuf:"bytes,2,rep,name=source_pod,json=sourcePod,proto3" json:"source_pod,omitempty"` // source_fqdn filters by a list of source fully qualified domain names SourceFqdn []string `protobuf:"bytes,7,rep,name=source_fqdn,json=sourceFqdn,proto3" json:"source_fqdn,omitempty"` @@ -2920,6 +2924,8 @@ type FlowFilter struct { HttpPath []string `protobuf:"bytes,22,rep,name=http_path,json=httpPath,proto3" json:"http_path,omitempty"` // http_url is a list of regular expressions to filter on the HTTP URL. HttpUrl []string `protobuf:"bytes,31,rep,name=http_url,json=httpUrl,proto3" json:"http_url,omitempty"` + // http_header is a list of key:value pairs to filter on the HTTP headers. + HttpHeader []*HTTPHeader `protobuf:"bytes,32,rep,name=http_header,json=httpHeader,proto3" json:"http_header,omitempty"` // tcp_flags filters flows based on TCP header flags TcpFlags []*TCPFlags `protobuf:"bytes,23,rep,name=tcp_flags,json=tcpFlags,proto3" json:"tcp_flags,omitempty"` // node_name is a list of patterns to filter on the node name, e.g. "k8s*", @@ -3152,6 +3158,13 @@ func (x *FlowFilter) GetHttpUrl() []string { return nil } +func (x *FlowFilter) GetHttpHeader() []*HTTPHeader { + if x != nil { + return x.HttpHeader + } + return nil +} + func (x *FlowFilter) GetTcpFlags() []*TCPFlags { if x != nil { return x.TcpFlags @@ -4823,7 +4836,7 @@ var file_flow_flow_proto_rawDesc = []byte{ 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x07, 0x73, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x22, 0xcf, 0x09, 0x0a, 0x0a, 0x46, 0x6c, + 0x52, 0x07, 0x73, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x22, 0x82, 0x0a, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x1d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, @@ -4891,551 +4904,556 @@ var file_flow_flow_proto_rawDesc = []byte{ 0x0a, 0x09, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x16, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x68, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x68, 0x12, 0x19, 0x0a, 0x08, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x1f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x68, - 0x74, 0x74, 0x70, 0x55, 0x72, 0x6c, 0x12, 0x2b, 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6c, - 0x61, 0x67, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, - 0x2e, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x08, 0x74, 0x63, 0x70, 0x46, 0x6c, - 0x61, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x18, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x2e, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x19, - 0x20, 0x03, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x69, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x1c, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x22, 0xce, 0x01, 0x0a, 0x03, - 0x44, 0x4e, 0x53, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x70, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x70, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x74, - 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x16, 0x0a, - 0x06, 0x63, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x63, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x05, 0x72, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x71, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x71, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x72, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x08, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x72, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x0a, - 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x12, 0x0a, 0x04, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x48, 0x54, - 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x05, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, - 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x61, - 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x70, - 0x69, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x63, 0x6f, - 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, - 0x6f, 0x70, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, - 0x63, 0x22, 0x3b, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x91, - 0x01, 0x0a, 0x09, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x06, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6e, - 0x75, 0x6d, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x6c, 0x6f, 0x73, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6e, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x4c, - 0x6f, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x63, - 0x70, 0x75, 0x22, 0xf6, 0x04, 0x0a, 0x0a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x14, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x75, - 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x55, 0x6e, - 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x48, 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, - 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x69, 0x6d, + 0x74, 0x74, 0x70, 0x55, 0x72, 0x6c, 0x12, 0x31, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x20, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, + 0x6f, 0x77, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x68, + 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x74, 0x63, 0x70, + 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x08, 0x74, 0x63, + 0x70, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, + 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x69, 0x70, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x1c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x22, 0xce, + 0x01, 0x0a, 0x03, 0x44, 0x4e, 0x53, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x69, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x70, 0x73, 0x12, 0x10, + 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x06, 0x63, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x6f, 0x62, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x72, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x71, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x71, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x72, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x72, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, + 0x34, 0x0a, 0x0a, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x05, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x12, 0x1d, + 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, + 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, + 0x0a, 0x07, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x72, 0x72, 0x65, + 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0d, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x22, 0x3b, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, + 0x2d, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x15, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x26, + 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x6c, 0x6f, 0x73, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6e, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x4c, 0x6f, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x03, 0x63, 0x70, 0x75, 0x22, 0xf6, 0x04, 0x0a, 0x0a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, + 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x48, 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, + 0x0a, 0x0d, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, + 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x52, 0x0a, 0x13, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x67, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, 0x6e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x12, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, + 0x65, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x4b, 0x0a, 0x0f, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x68, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x69, 0x70, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x69, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x69, 0x70, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x48, 0x0a, 0x0e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x70, 0x73, 0x65, 0x72, 0x74, 0x18, 0x6a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, + 0x73, 0x65, 0x72, 0x74, 0x12, 0x48, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, - 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x66, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x12, 0x52, 0x0a, 0x13, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x72, - 0x65, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, - 0x65, 0x67, 0x65, 0x6e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x48, 0x00, 0x52, 0x12, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x4b, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x68, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x69, 0x70, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x69, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, - 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x69, 0x70, 0x63, 0x61, 0x63, 0x68, - 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x48, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x75, 0x70, 0x73, 0x65, 0x72, 0x74, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, - 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x48, 0x00, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, - 0x74, 0x12, 0x48, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x6e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x11, 0x41, - 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x10, 0x54, 0x69, 0x6d, 0x65, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x04, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x6d, 0x0a, 0x18, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, - 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x09, 0x72, 0x75, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x59, 0x0a, 0x19, 0x45, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, 0x6e, 0x4e, 0x6f, 0x74, 0x69, + 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x0e, + 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, + 0x0a, 0x11, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x10, 0x54, + 0x69, 0x6d, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x2e, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x22, + 0x6d, 0x0a, 0x18, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x1d, 0x0a, 0x0a, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x75, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x59, + 0x0a, 0x19, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, 0x6e, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x93, 0x01, 0x0a, 0x1a, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x93, 0x01, 0x0a, 0x1a, 0x45, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, - 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x99, 0x02, 0x0a, - 0x13, 0x49, 0x50, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x69, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x63, 0x69, 0x64, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, - 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6f, 0x6c, 0x64, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x70, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, 0x70, 0x12, 0x1e, - 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x6c, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x70, 0x12, 0x1f, - 0x0a, 0x0b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x4b, 0x65, 0x79, 0x12, - 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x19, 0x0a, - 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x43, 0x0a, 0x1d, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x9a, 0x03, - 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x4e, 0x0a, 0x10, 0x66, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x52, 0x0f, 0x66, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x50, 0x0a, 0x11, 0x62, - 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x52, 0x10, 0x62, 0x61, 0x63, - 0x6b, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x29, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x74, - 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2c, - 0x0a, 0x12, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x65, 0x78, 0x74, 0x54, - 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2c, 0x0a, 0x12, - 0x69, 0x6e, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x69, 0x6e, 0x74, 0x54, 0x72, 0x61, - 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2b, 0x0a, 0x19, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x22, 0x3c, 0x0a, 0x10, 0x4e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, - 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xef, 0x02, 0x0a, 0x0a, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x26, - 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, - 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x31, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x31, 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72, - 0x67, 0x32, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x32, 0x12, 0x30, 0x0a, 0x04, - 0x61, 0x72, 0x67, 0x33, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, - 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x33, 0x12, 0x18, - 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2d, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x03, 0x63, 0x70, 0x75, 0x2a, 0x39, 0x0a, 0x08, 0x46, 0x6c, 0x6f, 0x77, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x4c, 0x33, 0x5f, 0x4c, 0x34, 0x10, 0x01, - 0x12, 0x06, 0x0a, 0x02, 0x4c, 0x37, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x43, 0x4b, - 0x10, 0x03, 0x2a, 0x39, 0x0a, 0x08, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0c, - 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, - 0x53, 0x50, 0x49, 0x52, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x02, 0x2a, 0xea, 0x01, - 0x0a, 0x15, 0x54, 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, - 0x57, 0x4e, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4f, - 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4f, 0x5f, 0x48, - 0x4f, 0x53, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4f, 0x5f, 0x53, 0x54, 0x41, 0x43, - 0x4b, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x4f, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x4c, 0x41, - 0x59, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x4f, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, - 0x4e, 0x54, 0x10, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x4e, 0x44, - 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x05, 0x12, 0x0e, 0x0a, 0x0a, 0x46, 0x52, 0x4f, 0x4d, 0x5f, - 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x52, 0x4f, 0x4d, 0x5f, - 0x48, 0x4f, 0x53, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x53, - 0x54, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x4f, - 0x56, 0x45, 0x52, 0x4c, 0x41, 0x59, 0x10, 0x09, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x52, 0x4f, 0x4d, - 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, 0x0a, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x4f, - 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, 0x0b, 0x2a, 0x48, 0x0a, 0x0a, 0x4c, 0x37, - 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x37, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, - 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x41, 0x4d, 0x50, - 0x4c, 0x45, 0x10, 0x03, 0x2a, 0x30, 0x0a, 0x09, 0x49, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x49, 0x50, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x76, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x50, 0x76, 0x36, 0x10, 0x02, 0x2a, 0x7c, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x64, 0x69, 0x63, - 0x74, 0x12, 0x13, 0x0a, 0x0f, 0x56, 0x45, 0x52, 0x44, 0x49, 0x43, 0x54, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, - 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x52, 0x4f, 0x50, 0x50, 0x45, 0x44, - 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x12, 0x09, 0x0a, - 0x05, 0x41, 0x55, 0x44, 0x49, 0x54, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x44, 0x49, - 0x52, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x52, 0x41, 0x43, - 0x45, 0x44, 0x10, 0x06, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x4c, 0x41, 0x54, - 0x45, 0x44, 0x10, 0x07, 0x2a, 0xf0, 0x0f, 0x0a, 0x0a, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, - 0x73, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x52, 0x45, 0x41, 0x53, - 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x12, - 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x4d, - 0x41, 0x43, 0x10, 0x82, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, - 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x43, - 0x10, 0x83, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, - 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x49, 0x50, 0x10, 0x84, 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x50, - 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x85, 0x01, 0x12, - 0x1b, 0x0a, 0x16, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x50, 0x41, 0x43, 0x4b, 0x45, - 0x54, 0x5f, 0x44, 0x52, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x86, 0x01, 0x12, 0x23, 0x0a, 0x1e, - 0x43, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x4e, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x4f, 0x52, 0x5f, - 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x87, - 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x43, 0x54, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, - 0x54, 0x43, 0x50, 0x5f, 0x41, 0x43, 0x4b, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x88, 0x01, 0x12, - 0x1b, 0x0a, 0x16, 0x43, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x34, - 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x89, 0x01, 0x12, 0x27, 0x0a, 0x22, - 0x43, 0x54, 0x5f, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, - 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x50, 0x41, 0x43, 0x4b, - 0x45, 0x54, 0x10, 0x8a, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, - 0x52, 0x54, 0x45, 0x44, 0x5f, 0x4c, 0x33, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, - 0x10, 0x8b, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x4d, 0x49, 0x53, 0x53, 0x45, 0x44, 0x5f, 0x54, 0x41, - 0x49, 0x4c, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x8c, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x45, 0x52, - 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x50, - 0x41, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x8d, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, - 0x8e, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, - 0x4d, 0x50, 0x56, 0x34, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x8f, 0x01, 0x12, 0x18, 0x0a, 0x13, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x34, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x10, 0x90, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x91, 0x01, - 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, - 0x56, 0x36, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x92, 0x01, 0x12, 0x20, 0x0a, 0x1b, 0x45, 0x52, - 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, 0x45, 0x56, 0x49, 0x4e, 0x47, 0x5f, 0x54, - 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x93, 0x01, 0x12, 0x24, 0x0a, 0x1f, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, 0x45, 0x56, 0x49, 0x4e, 0x47, - 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, - 0x94, 0x01, 0x12, 0x1a, 0x0a, 0x15, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x47, 0x45, - 0x4e, 0x45, 0x56, 0x45, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x95, 0x01, 0x12, 0x1e, - 0x0a, 0x19, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x33, 0x5f, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x96, 0x01, 0x12, 0x1b, - 0x0a, 0x16, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x52, 0x4f, 0x55, - 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x49, 0x50, 0x10, 0x97, 0x01, 0x12, 0x26, 0x0a, 0x21, 0x4e, - 0x4f, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x4c, - 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, - 0x10, 0x98, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x48, 0x49, - 0x4c, 0x45, 0x5f, 0x43, 0x4f, 0x52, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x33, - 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x53, 0x55, 0x4d, 0x10, 0x99, 0x01, 0x12, 0x27, 0x0a, 0x22, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x48, 0x49, 0x4c, 0x45, 0x5f, 0x43, 0x4f, 0x52, 0x52, - 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x53, - 0x55, 0x4d, 0x10, 0x9a, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x5f, - 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, - 0x10, 0x9b, 0x01, 0x12, 0x22, 0x0a, 0x1d, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x49, - 0x50, 0x56, 0x36, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x48, 0x45, - 0x41, 0x44, 0x45, 0x52, 0x10, 0x9c, 0x01, 0x12, 0x23, 0x0a, 0x1e, 0x49, 0x50, 0x5f, 0x46, 0x52, - 0x41, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, - 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x9d, 0x01, 0x12, 0x1e, 0x0a, 0x19, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, - 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x9e, 0x01, 0x12, 0x28, 0x0a, 0x23, - 0x4e, 0x4f, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x4e, 0x43, - 0x41, 0x50, 0x53, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, - 0x49, 0x4e, 0x54, 0x10, 0xa0, 0x01, 0x12, 0x23, 0x0a, 0x1e, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, - 0x5f, 0x54, 0x4f, 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x49, 0x4e, 0x54, 0x4f, 0x5f, - 0x50, 0x52, 0x4f, 0x58, 0x59, 0x4d, 0x41, 0x50, 0x10, 0xa1, 0x01, 0x12, 0x2b, 0x0a, 0x26, 0x52, - 0x45, 0x41, 0x43, 0x48, 0x45, 0x44, 0x5f, 0x45, 0x44, 0x54, 0x5f, 0x52, 0x41, 0x54, 0x45, 0x5f, - 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x4f, - 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x10, 0xa2, 0x01, 0x12, 0x26, 0x0a, 0x21, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, - 0x52, 0x41, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0xa3, 0x01, - 0x12, 0x1e, 0x0a, 0x19, 0x4c, 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x5f, 0x49, - 0x53, 0x5f, 0x55, 0x4e, 0x52, 0x45, 0x41, 0x43, 0x48, 0x41, 0x42, 0x4c, 0x45, 0x10, 0xa4, 0x01, - 0x12, 0x3a, 0x0a, 0x35, 0x4e, 0x4f, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54, - 0x4f, 0x5f, 0x50, 0x45, 0x52, 0x46, 0x4f, 0x52, 0x4d, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, - 0x5f, 0x44, 0x45, 0x43, 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0xa5, 0x01, 0x12, 0x1c, 0x0a, 0x17, - 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x4c, 0x32, 0x5f, 0x50, - 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0xa6, 0x01, 0x12, 0x22, 0x0a, 0x1d, 0x4e, 0x4f, - 0x5f, 0x4d, 0x41, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x4e, 0x41, 0x54, - 0x5f, 0x4d, 0x41, 0x53, 0x51, 0x55, 0x45, 0x52, 0x41, 0x44, 0x45, 0x10, 0xa7, 0x01, 0x12, 0x2c, - 0x0a, 0x27, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x50, 0x52, - 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4d, - 0x41, 0x53, 0x51, 0x55, 0x45, 0x52, 0x41, 0x44, 0x45, 0x10, 0xa8, 0x01, 0x12, 0x16, 0x0a, 0x11, - 0x46, 0x49, 0x42, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, - 0x44, 0x10, 0xa9, 0x01, 0x12, 0x28, 0x0a, 0x23, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x53, 0x55, 0x4c, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x46, 0x46, 0x49, 0x43, 0x5f, 0x49, 0x53, - 0x5f, 0x50, 0x52, 0x4f, 0x48, 0x49, 0x42, 0x49, 0x54, 0x45, 0x44, 0x10, 0xaa, 0x01, 0x12, 0x15, - 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x49, 0x44, 0x45, 0x4e, 0x54, 0x49, - 0x54, 0x59, 0x10, 0xab, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x5f, 0x53, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x10, 0xac, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x4e, 0x41, - 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4e, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0xad, 0x01, 0x12, - 0x13, 0x0a, 0x0e, 0x49, 0x53, 0x5f, 0x41, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x49, - 0x50, 0x10, 0xae, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x46, 0x49, 0x52, 0x53, 0x54, 0x5f, 0x4c, 0x4f, - 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x47, 0x52, 0x41, 0x4d, 0x5f, 0x46, - 0x52, 0x41, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, - 0x44, 0x10, 0xaf, 0x01, 0x12, 0x1d, 0x0a, 0x18, 0x46, 0x4f, 0x52, 0x42, 0x49, 0x44, 0x44, 0x45, - 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x10, 0xb0, 0x01, 0x12, 0x21, 0x0a, 0x1c, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x5f, 0x42, 0x59, - 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x52, 0x43, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x5f, 0x43, 0x48, - 0x45, 0x43, 0x4b, 0x10, 0xb1, 0x01, 0x12, 0x19, 0x0a, 0x14, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, - 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xb2, - 0x01, 0x12, 0x19, 0x0a, 0x14, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x41, 0x53, 0x53, 0x49, - 0x47, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xb3, 0x01, 0x12, 0x31, 0x0a, 0x2c, - 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, - 0x46, 0x4f, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0xb4, 0x01, 0x12, - 0x10, 0x0a, 0x0b, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x59, 0x10, 0xb5, - 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x56, 0x4c, 0x41, 0x4e, 0x5f, 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52, - 0x45, 0x44, 0x10, 0xb6, 0x01, 0x12, 0x10, 0x0a, 0x0b, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, - 0x5f, 0x56, 0x4e, 0x49, 0x10, 0xb7, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x49, 0x4e, 0x56, 0x41, 0x4c, - 0x49, 0x44, 0x5f, 0x54, 0x43, 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0xb8, 0x01, 0x12, - 0x0b, 0x0a, 0x06, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x10, 0xb9, 0x01, 0x12, 0x17, 0x0a, 0x12, - 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x52, 0x56, 0x36, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x45, 0x10, 0xba, 0x01, 0x12, 0x0a, 0x0a, 0x05, 0x4e, 0x41, 0x54, 0x34, 0x36, 0x10, 0xbb, - 0x01, 0x12, 0x0a, 0x0a, 0x05, 0x4e, 0x41, 0x54, 0x36, 0x34, 0x10, 0xbc, 0x01, 0x12, 0x12, 0x0a, - 0x0d, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0xbd, - 0x01, 0x12, 0x14, 0x0a, 0x0f, 0x43, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, - 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xbe, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x53, 0x4e, 0x41, 0x54, 0x5f, - 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xbf, 0x01, 0x12, - 0x17, 0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, - 0x45, 0x52, 0x5f, 0x49, 0x44, 0x10, 0xc0, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x55, 0x4e, 0x53, 0x55, - 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, - 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x44, 0x53, 0x52, 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x10, 0xc1, - 0x01, 0x12, 0x16, 0x0a, 0x11, 0x4e, 0x4f, 0x5f, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x47, - 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0xc2, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x45, - 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x46, 0x46, 0x49, 0x43, - 0x10, 0xc3, 0x01, 0x12, 0x11, 0x0a, 0x0c, 0x54, 0x54, 0x4c, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, - 0x44, 0x45, 0x44, 0x10, 0xc4, 0x01, 0x12, 0x0f, 0x0a, 0x0a, 0x4e, 0x4f, 0x5f, 0x4e, 0x4f, 0x44, - 0x45, 0x5f, 0x49, 0x44, 0x10, 0xc5, 0x01, 0x2a, 0x4a, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, 0x66, - 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x19, 0x54, - 0x52, 0x41, 0x46, 0x46, 0x49, 0x43, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, - 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53, - 0x53, 0x10, 0x02, 0x2a, 0x8d, 0x02, 0x0a, 0x11, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, - 0x74, 0x75, 0x72, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47, - 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, - 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x59, - 0x10, 0x04, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, - 0x45, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x4c, 0x42, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x44, - 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x41, 0x46, 0x54, 0x45, 0x52, - 0x5f, 0x56, 0x34, 0x36, 0x10, 0x06, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, - 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x56, 0x36, 0x34, 0x10, - 0x07, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, - 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x50, 0x52, 0x45, 0x10, 0x08, 0x12, 0x1a, 0x0a, 0x16, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, + 0x99, 0x02, 0x0a, 0x13, 0x49, 0x50, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x69, 0x64, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x69, 0x64, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, 0x6f, 0x6c, 0x64, 0x5f, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6f, 0x6c, 0x64, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, + 0x5f, 0x69, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, + 0x70, 0x12, 0x1e, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x70, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x6c, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x49, + 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x4b, + 0x65, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x43, 0x0a, 0x1d, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, + 0x22, 0x9a, 0x03, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, + 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x4e, + 0x0a, 0x10, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x52, 0x0f, 0x66, + 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x50, + 0x0a, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x52, 0x10, + 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, + 0x52, 0x0d, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x65, + 0x78, 0x74, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x69, 0x6e, 0x74, + 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2b, 0x0a, + 0x19, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x22, 0x3c, 0x0a, 0x10, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xef, 0x02, 0x0a, 0x0a, 0x44, 0x65, 0x62, + 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, + 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x26, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x68, 0x61, 0x73, + 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x30, 0x0a, 0x04, 0x61, + 0x72, 0x67, 0x31, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x31, 0x12, 0x30, 0x0a, + 0x04, 0x61, 0x72, 0x67, 0x32, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x32, 0x12, + 0x30, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x33, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, + 0x33, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2d, 0x0a, 0x03, 0x63, + 0x70, 0x75, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x63, 0x70, 0x75, 0x2a, 0x39, 0x0a, 0x08, 0x46, 0x6c, + 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x4c, 0x33, 0x5f, 0x4c, + 0x34, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4c, 0x37, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x53, + 0x4f, 0x43, 0x4b, 0x10, 0x03, 0x2a, 0x39, 0x0a, 0x08, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x09, 0x0a, 0x05, 0x53, 0x50, 0x49, 0x52, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x45, + 0x53, 0x54, 0x5f, 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x02, + 0x2a, 0xea, 0x01, 0x0a, 0x15, 0x54, 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x00, 0x12, 0x0c, 0x0a, + 0x08, 0x54, 0x4f, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x54, + 0x4f, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4f, 0x5f, 0x53, + 0x54, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x4f, 0x5f, 0x4f, 0x56, 0x45, + 0x52, 0x4c, 0x41, 0x59, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x4f, 0x5f, 0x45, 0x4e, 0x44, + 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x52, 0x4f, 0x4d, 0x5f, + 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x05, 0x12, 0x0e, 0x0a, 0x0a, 0x46, 0x52, + 0x4f, 0x4d, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x52, + 0x4f, 0x4d, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x46, 0x52, 0x4f, + 0x4d, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x52, 0x4f, + 0x4d, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x4c, 0x41, 0x59, 0x10, 0x09, 0x12, 0x10, 0x0a, 0x0c, 0x46, + 0x52, 0x4f, 0x4d, 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, 0x0a, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x4f, 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, 0x0b, 0x2a, 0x48, 0x0a, + 0x0a, 0x4c, 0x37, 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x37, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, + 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, + 0x08, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x53, + 0x41, 0x4d, 0x50, 0x4c, 0x45, 0x10, 0x03, 0x2a, 0x30, 0x0a, 0x09, 0x49, 0x50, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x49, 0x50, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, + 0x53, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x76, 0x34, 0x10, 0x01, 0x12, + 0x08, 0x0a, 0x04, 0x49, 0x50, 0x76, 0x36, 0x10, 0x02, 0x2a, 0x7c, 0x0a, 0x07, 0x56, 0x65, 0x72, + 0x64, 0x69, 0x63, 0x74, 0x12, 0x13, 0x0a, 0x0f, 0x56, 0x45, 0x52, 0x44, 0x49, 0x43, 0x54, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x4f, 0x52, + 0x57, 0x41, 0x52, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x52, 0x4f, 0x50, + 0x50, 0x45, 0x44, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, + 0x12, 0x09, 0x0a, 0x05, 0x41, 0x55, 0x44, 0x49, 0x54, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x52, + 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x54, + 0x52, 0x41, 0x43, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x52, 0x41, 0x4e, 0x53, + 0x4c, 0x41, 0x54, 0x45, 0x44, 0x10, 0x07, 0x2a, 0x88, 0x10, 0x0a, 0x0a, 0x44, 0x72, 0x6f, 0x70, + 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x52, + 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x17, 0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, + 0x45, 0x5f, 0x4d, 0x41, 0x43, 0x10, 0x82, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x49, 0x4e, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x4d, 0x41, 0x43, 0x10, 0x83, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, + 0x44, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x49, 0x50, 0x10, 0x84, 0x01, 0x12, 0x12, + 0x0a, 0x0d, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, + 0x85, 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x50, 0x41, + 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x44, 0x52, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x86, 0x01, 0x12, + 0x23, 0x0a, 0x1e, 0x43, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x4e, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, + 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, + 0x52, 0x10, 0x87, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x43, 0x54, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, + 0x4e, 0x47, 0x5f, 0x54, 0x43, 0x50, 0x5f, 0x41, 0x43, 0x4b, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, + 0x88, 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x43, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x89, 0x01, 0x12, + 0x27, 0x0a, 0x22, 0x43, 0x54, 0x5f, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, + 0x41, 0x54, 0x45, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x8a, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x55, 0x4e, 0x53, 0x55, + 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x4c, 0x33, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, + 0x43, 0x4f, 0x4c, 0x10, 0x8b, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x4d, 0x49, 0x53, 0x53, 0x45, 0x44, + 0x5f, 0x54, 0x41, 0x49, 0x4c, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x8c, 0x01, 0x12, 0x1c, 0x0a, + 0x17, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x54, + 0x4f, 0x5f, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x8d, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, + 0x4f, 0x4c, 0x10, 0x8e, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x34, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x8f, 0x01, 0x12, + 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, + 0x34, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x90, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x43, 0x4f, 0x44, 0x45, + 0x10, 0x91, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, + 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x92, 0x01, 0x12, 0x20, 0x0a, + 0x1b, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, 0x45, 0x56, 0x49, 0x4e, + 0x47, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x93, 0x01, 0x12, + 0x24, 0x0a, 0x1f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, 0x45, 0x56, + 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, + 0x4e, 0x53, 0x10, 0x94, 0x01, 0x12, 0x1a, 0x0a, 0x15, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, + 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x56, 0x45, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x95, + 0x01, 0x12, 0x1e, 0x0a, 0x19, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x33, 0x5f, + 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x96, + 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x5f, 0x55, 0x4e, + 0x52, 0x4f, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x49, 0x50, 0x10, 0x97, 0x01, 0x12, 0x26, + 0x0a, 0x21, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x4f, + 0x43, 0x41, 0x4c, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x46, 0x4f, + 0x55, 0x4e, 0x44, 0x10, 0x98, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, + 0x57, 0x48, 0x49, 0x4c, 0x45, 0x5f, 0x43, 0x4f, 0x52, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, + 0x5f, 0x4c, 0x33, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x53, 0x55, 0x4d, 0x10, 0x99, 0x01, 0x12, + 0x27, 0x0a, 0x22, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x48, 0x49, 0x4c, 0x45, 0x5f, 0x43, + 0x4f, 0x52, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x43, 0x48, 0x45, + 0x43, 0x4b, 0x53, 0x55, 0x4d, 0x10, 0x9a, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x43, 0x54, 0x5f, 0x4d, + 0x41, 0x50, 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, + 0x4c, 0x45, 0x44, 0x10, 0x9b, 0x01, 0x12, 0x22, 0x0a, 0x1d, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, + 0x44, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, + 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x9c, 0x01, 0x12, 0x23, 0x0a, 0x1e, 0x49, 0x50, + 0x5f, 0x46, 0x52, 0x41, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, + 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x9d, 0x01, 0x12, + 0x1e, 0x0a, 0x19, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, + 0x4e, 0x44, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x9e, 0x01, 0x12, + 0x28, 0x0a, 0x23, 0x4e, 0x4f, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4f, 0x52, 0x5f, + 0x45, 0x4e, 0x43, 0x41, 0x50, 0x53, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x45, 0x4e, + 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0xa0, 0x01, 0x12, 0x23, 0x0a, 0x1e, 0x46, 0x41, 0x49, + 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x4f, 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x49, 0x4e, + 0x54, 0x4f, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x4d, 0x41, 0x50, 0x10, 0xa1, 0x01, 0x12, 0x2b, + 0x0a, 0x26, 0x52, 0x45, 0x41, 0x43, 0x48, 0x45, 0x44, 0x5f, 0x45, 0x44, 0x54, 0x5f, 0x52, 0x41, + 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x52, 0x4f, 0x50, + 0x5f, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x10, 0xa2, 0x01, 0x12, 0x26, 0x0a, 0x21, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, + 0x10, 0xa3, 0x01, 0x12, 0x1e, 0x0a, 0x19, 0x4c, 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x48, 0x4f, 0x53, + 0x54, 0x5f, 0x49, 0x53, 0x5f, 0x55, 0x4e, 0x52, 0x45, 0x41, 0x43, 0x48, 0x41, 0x42, 0x4c, 0x45, + 0x10, 0xa4, 0x01, 0x12, 0x3a, 0x0a, 0x35, 0x4e, 0x4f, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, + 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, + 0x45, 0x5f, 0x54, 0x4f, 0x5f, 0x50, 0x45, 0x52, 0x46, 0x4f, 0x52, 0x4d, 0x5f, 0x50, 0x4f, 0x4c, + 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x43, 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0xa5, 0x01, 0x12, + 0x1c, 0x0a, 0x17, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x4c, + 0x32, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0xa6, 0x01, 0x12, 0x22, 0x0a, + 0x1d, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x4f, 0x52, 0x5f, + 0x4e, 0x41, 0x54, 0x5f, 0x4d, 0x41, 0x53, 0x51, 0x55, 0x45, 0x52, 0x41, 0x44, 0x45, 0x10, 0xa7, + 0x01, 0x12, 0x2c, 0x0a, 0x27, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, + 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x4e, 0x41, + 0x54, 0x5f, 0x4d, 0x41, 0x53, 0x51, 0x55, 0x45, 0x52, 0x41, 0x44, 0x45, 0x10, 0xa8, 0x01, 0x12, + 0x16, 0x0a, 0x11, 0x46, 0x49, 0x42, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x41, + 0x49, 0x4c, 0x45, 0x44, 0x10, 0xa9, 0x01, 0x12, 0x28, 0x0a, 0x23, 0x45, 0x4e, 0x43, 0x41, 0x50, + 0x53, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x46, 0x46, 0x49, 0x43, + 0x5f, 0x49, 0x53, 0x5f, 0x50, 0x52, 0x4f, 0x48, 0x49, 0x42, 0x49, 0x54, 0x45, 0x44, 0x10, 0xaa, + 0x01, 0x12, 0x15, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x49, 0x44, 0x45, + 0x4e, 0x54, 0x49, 0x54, 0x59, 0x10, 0xab, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x10, 0xac, 0x01, 0x12, 0x13, 0x0a, + 0x0e, 0x4e, 0x41, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4e, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, + 0xad, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x49, 0x53, 0x5f, 0x41, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, + 0x45, 0x52, 0x49, 0x50, 0x10, 0xae, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x46, 0x49, 0x52, 0x53, 0x54, + 0x5f, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x47, 0x52, 0x41, + 0x4d, 0x5f, 0x46, 0x52, 0x41, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, + 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xaf, 0x01, 0x12, 0x1d, 0x0a, 0x18, 0x46, 0x4f, 0x52, 0x42, 0x49, + 0x44, 0x44, 0x45, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x4d, 0x45, 0x53, 0x53, + 0x41, 0x47, 0x45, 0x10, 0xb0, 0x01, 0x12, 0x21, 0x0a, 0x1c, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, + 0x5f, 0x42, 0x59, 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x52, 0x43, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, + 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0xb1, 0x01, 0x12, 0x19, 0x0a, 0x14, 0x53, 0x4f, 0x43, + 0x4b, 0x45, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, + 0x44, 0x10, 0xb2, 0x01, 0x12, 0x19, 0x0a, 0x14, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x41, + 0x53, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xb3, 0x01, 0x12, + 0x31, 0x0a, 0x2c, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, + 0x45, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, + 0xb4, 0x01, 0x12, 0x10, 0x0a, 0x0b, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, + 0x59, 0x10, 0xb5, 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x56, 0x4c, 0x41, 0x4e, 0x5f, 0x46, 0x49, 0x4c, + 0x54, 0x45, 0x52, 0x45, 0x44, 0x10, 0xb6, 0x01, 0x12, 0x10, 0x0a, 0x0b, 0x49, 0x4e, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x5f, 0x56, 0x4e, 0x49, 0x10, 0xb7, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x49, 0x4e, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x54, 0x43, 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, + 0xb8, 0x01, 0x12, 0x0b, 0x0a, 0x06, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x10, 0xb9, 0x01, 0x12, + 0x17, 0x0a, 0x12, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x52, 0x56, 0x36, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0xba, 0x01, 0x12, 0x0a, 0x0a, 0x05, 0x4e, 0x41, 0x54, 0x34, + 0x36, 0x10, 0xbb, 0x01, 0x12, 0x0a, 0x0a, 0x05, 0x4e, 0x41, 0x54, 0x36, 0x34, 0x10, 0xbc, 0x01, + 0x12, 0x12, 0x0a, 0x0d, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, + 0x44, 0x10, 0xbd, 0x01, 0x12, 0x14, 0x0a, 0x0f, 0x43, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, + 0x50, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xbe, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x53, 0x4e, + 0x41, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, + 0xbf, 0x01, 0x12, 0x17, 0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x43, 0x4c, + 0x55, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x49, 0x44, 0x10, 0xc0, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x55, + 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, + 0x43, 0x4f, 0x4c, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x44, 0x53, 0x52, 0x5f, 0x45, 0x4e, 0x43, 0x41, + 0x50, 0x10, 0xc1, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x4e, 0x4f, 0x5f, 0x45, 0x47, 0x52, 0x45, 0x53, + 0x53, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0xc2, 0x01, 0x12, 0x18, 0x0a, 0x13, + 0x55, 0x4e, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x46, + 0x46, 0x49, 0x43, 0x10, 0xc3, 0x01, 0x12, 0x11, 0x0a, 0x0c, 0x54, 0x54, 0x4c, 0x5f, 0x45, 0x58, + 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0xc4, 0x01, 0x12, 0x0f, 0x0a, 0x0a, 0x4e, 0x4f, 0x5f, + 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x49, 0x44, 0x10, 0xc5, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x44, 0x52, + 0x4f, 0x50, 0x5f, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, + 0xc6, 0x01, 0x2a, 0x4a, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x19, 0x54, 0x52, 0x41, 0x46, 0x46, 0x49, + 0x43, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, + 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x02, 0x2a, 0x8d, + 0x02, 0x0a, 0x11, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, + 0x55, 0x52, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, + 0x52, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x59, 0x10, 0x04, 0x12, 0x17, 0x0a, + 0x13, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x46, 0x52, 0x4f, + 0x4d, 0x5f, 0x4c, 0x42, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, + 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x56, 0x34, 0x36, 0x10, + 0x06, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, + 0x5f, 0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x56, 0x36, 0x34, 0x10, 0x07, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x58, - 0x59, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, - 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x50, 0x52, 0x45, - 0x10, 0x0a, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, - 0x45, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x0b, 0x22, 0x04, 0x08, - 0x01, 0x10, 0x03, 0x2a, 0x39, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, - 0x0b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x10, 0x09, 0x12, 0x0e, - 0x0a, 0x0a, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x4c, 0x6f, 0x73, 0x74, 0x10, 0x02, 0x2a, 0x7f, - 0x0a, 0x0f, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x4f, 0x53, - 0x54, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x00, - 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x45, 0x52, 0x46, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x52, - 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, - 0x4f, 0x42, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x53, 0x5f, - 0x51, 0x55, 0x45, 0x55, 0x45, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x48, 0x55, 0x42, 0x42, 0x4c, - 0x45, 0x5f, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0x03, 0x2a, - 0xae, 0x02, 0x0a, 0x0e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x45, 0x56, 0x45, 0x4e, - 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, - 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, - 0x0a, 0x0e, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x44, - 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4c, - 0x45, 0x54, 0x45, 0x44, 0x10, 0x04, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, - 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x55, - 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x05, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f, - 0x49, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x46, - 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x06, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x4e, 0x44, 0x50, - 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, 0x14, - 0x0a, 0x10, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, - 0x45, 0x44, 0x10, 0x08, 0x12, 0x14, 0x0a, 0x10, 0x49, 0x50, 0x43, 0x41, 0x43, 0x48, 0x45, 0x5f, - 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x45, 0x44, 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x49, 0x50, - 0x43, 0x41, 0x43, 0x48, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, - 0x14, 0x0a, 0x10, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, - 0x54, 0x45, 0x44, 0x10, 0x0b, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, - 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x22, 0x04, 0x08, 0x01, 0x10, 0x01, - 0x2a, 0xd8, 0x01, 0x0a, 0x16, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x18, 0x53, - 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f, 0x43, - 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x52, - 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x57, 0x44, 0x10, - 0x01, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, - 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x57, 0x44, 0x10, 0x02, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f, - 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, - 0x52, 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x56, - 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, - 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, - 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x04, 0x2a, 0x81, 0x0d, 0x0a, 0x0e, - 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, - 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x42, 0x47, 0x5f, 0x47, 0x45, 0x4e, - 0x45, 0x52, 0x49, 0x43, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x4f, - 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0d, - 0x0a, 0x09, 0x44, 0x42, 0x47, 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x10, 0x03, 0x12, 0x11, 0x0a, - 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x58, 0x43, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x04, - 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, - 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x43, - 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, - 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x52, 0x45, 0x56, 0x10, - 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x54, 0x43, - 0x48, 0x10, 0x08, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, - 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, - 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x32, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10, - 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, - 0x10, 0x0b, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x0c, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, - 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x4e, 0x53, 0x10, 0x0d, 0x12, 0x1b, 0x0a, 0x17, 0x44, - 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x45, 0x58, - 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x0e, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, - 0x43, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x44, 0x49, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x0d, 0x0a, 0x09, - 0x44, 0x42, 0x47, 0x5f, 0x44, 0x45, 0x43, 0x41, 0x50, 0x10, 0x10, 0x12, 0x10, 0x0a, 0x0c, 0x44, - 0x42, 0x47, 0x5f, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x10, 0x11, 0x12, 0x11, 0x0a, - 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x10, 0x12, - 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x42, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10, - 0x13, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x53, 0x54, 0x41, 0x43, - 0x4b, 0x10, 0x14, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4b, 0x54, 0x5f, 0x48, - 0x41, 0x53, 0x48, 0x10, 0x15, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, - 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, - 0x10, 0x16, 0x12, 0x20, 0x0a, 0x1c, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, - 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, - 0x49, 0x4c, 0x10, 0x17, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, - 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, - 0x4c, 0x4f, 0x54, 0x10, 0x18, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, + 0x59, 0x5f, 0x50, 0x52, 0x45, 0x10, 0x08, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x43, + 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x50, 0x4f, 0x53, + 0x54, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, + 0x52, 0x45, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x10, 0x0a, 0x12, 0x19, 0x0a, + 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x53, 0x4e, 0x41, + 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x0b, 0x22, 0x04, 0x08, 0x01, 0x10, 0x03, 0x2a, 0x39, + 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x4c, 0x6f, 0x73, 0x74, 0x10, 0x02, 0x2a, 0x7f, 0x0a, 0x0f, 0x4c, 0x6f, 0x73, + 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x19, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x4f, 0x53, 0x54, 0x5f, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x50, + 0x45, 0x52, 0x46, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x42, + 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x4f, 0x42, 0x53, 0x45, 0x52, + 0x56, 0x45, 0x52, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x53, 0x5f, 0x51, 0x55, 0x45, 0x55, 0x45, + 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x48, 0x55, 0x42, 0x42, 0x4c, 0x45, 0x5f, 0x52, 0x49, 0x4e, + 0x47, 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0x03, 0x2a, 0xae, 0x02, 0x0a, 0x0e, 0x41, + 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, + 0x13, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, + 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x4f, 0x4c, + 0x49, 0x43, 0x59, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, + 0x0e, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, + 0x04, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x52, 0x45, + 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, + 0x10, 0x05, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x52, + 0x45, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, + 0x45, 0x10, 0x06, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, + 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x4e, 0x44, + 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, + 0x14, 0x0a, 0x10, 0x49, 0x50, 0x43, 0x41, 0x43, 0x48, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, + 0x54, 0x45, 0x44, 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x49, 0x50, 0x43, 0x41, 0x43, 0x48, 0x45, + 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x45, + 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0b, + 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, + 0x54, 0x45, 0x44, 0x10, 0x0c, 0x22, 0x04, 0x08, 0x01, 0x10, 0x01, 0x2a, 0xd8, 0x01, 0x0a, 0x16, + 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, + 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, + 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x5f, 0x44, 0x49, 0x52, + 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x57, 0x44, 0x10, 0x01, 0x12, 0x27, 0x0a, 0x23, + 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, + 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x46, 0x57, 0x44, 0x10, 0x02, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, + 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x5f, 0x44, 0x49, + 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x03, 0x12, 0x27, 0x0a, + 0x23, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, + 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x52, 0x45, 0x56, 0x10, 0x04, 0x2a, 0x81, 0x0d, 0x0a, 0x0e, 0x44, 0x65, 0x62, 0x75, 0x67, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, + 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x42, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x49, 0x43, 0x10, + 0x01, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x44, + 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x42, 0x47, + 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, + 0x4c, 0x58, 0x43, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x44, + 0x42, 0x47, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, + 0x10, 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, + 0x4b, 0x55, 0x50, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, + 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, + 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x08, 0x12, 0x12, + 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, + 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x32, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x49, + 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x10, 0x0b, 0x12, 0x15, 0x0a, + 0x11, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, + 0x53, 0x54, 0x10, 0x0c, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, + 0x36, 0x5f, 0x4e, 0x53, 0x10, 0x0d, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, + 0x4d, 0x50, 0x36, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, + 0x44, 0x10, 0x0e, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x56, 0x45, + 0x52, 0x44, 0x49, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x42, 0x47, 0x5f, 0x44, + 0x45, 0x43, 0x41, 0x50, 0x10, 0x10, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4f, + 0x52, 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x10, 0x11, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, + 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x10, 0x12, 0x12, 0x0f, 0x0a, 0x0b, 0x44, + 0x42, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x13, 0x12, 0x10, 0x0a, 0x0c, + 0x44, 0x42, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x10, 0x14, 0x12, 0x10, + 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4b, 0x54, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x15, + 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, + 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x10, 0x16, 0x12, 0x20, 0x0a, + 0x1c, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, + 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x17, 0x12, + 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, + 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x10, 0x18, + 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, + 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, + 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x19, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, + 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, + 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32, 0x5f, 0x46, 0x41, 0x49, 0x4c, + 0x10, 0x1a, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, + 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, + 0x4c, 0x10, 0x1b, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x52, + 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, + 0x50, 0x10, 0x1c, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x52, + 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10, 0x1d, 0x12, 0x1b, 0x0a, 0x17, + 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, + 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x10, 0x1e, 0x12, 0x20, 0x0a, 0x1c, 0x44, 0x42, 0x47, + 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, + 0x54, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x1f, 0x12, 0x1f, 0x0a, 0x1b, 0x44, + 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, + 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x10, 0x20, 0x12, 0x27, 0x0a, 0x23, + 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, + 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x43, 0x43, + 0x45, 0x53, 0x53, 0x10, 0x21, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, - 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x19, 0x12, 0x27, - 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, - 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32, - 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x1a, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, - 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, - 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x1b, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f, - 0x4c, 0x42, 0x36, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f, - 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x1c, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, - 0x4c, 0x42, 0x36, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10, - 0x1d, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, - 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x10, 0x1e, 0x12, 0x20, - 0x0a, 0x1c, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, - 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x1f, - 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, - 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x10, - 0x20, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, - 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, - 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x21, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, - 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, - 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32, 0x5f, 0x46, 0x41, 0x49, - 0x4c, 0x10, 0x22, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, - 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, - 0x49, 0x4c, 0x10, 0x23, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, - 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, - 0x55, 0x50, 0x10, 0x24, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, - 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10, 0x25, 0x12, 0x19, 0x0a, - 0x15, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x50, 0x42, 0x41, 0x43, - 0x4b, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x10, 0x26, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47, 0x5f, + 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x22, 0x12, 0x1f, + 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, + 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x23, 0x12, + 0x1e, 0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, + 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x24, 0x12, + 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, + 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10, 0x25, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x50, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x4e, 0x41, - 0x54, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x27, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, - 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x10, 0x28, 0x12, 0x1b, 0x0a, 0x17, 0x44, - 0x42, 0x47, 0x5f, 0x52, 0x52, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, - 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x4c, 0x10, 0x29, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, - 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, - 0x10, 0x2a, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52, - 0x4f, 0x58, 0x59, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x2b, 0x12, 0x18, 0x0a, 0x14, 0x44, - 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x55, 0x50, 0x44, - 0x41, 0x54, 0x45, 0x10, 0x2c, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x34, 0x5f, - 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x10, 0x2d, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, - 0x4e, 0x45, 0x54, 0x44, 0x45, 0x56, 0x5f, 0x49, 0x4e, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, - 0x52, 0x10, 0x2e, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x4e, 0x45, 0x54, 0x44, 0x45, - 0x56, 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x34, 0x10, 0x2f, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, - 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x5f, 0x31, 0x10, 0x30, - 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, - 0x50, 0x34, 0x5f, 0x32, 0x10, 0x31, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, - 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x34, 0x10, 0x32, 0x12, 0x14, 0x0a, 0x10, 0x44, - 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x5f, 0x31, 0x10, - 0x33, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, - 0x55, 0x50, 0x36, 0x5f, 0x32, 0x10, 0x34, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, - 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x36, 0x10, 0x35, 0x12, 0x12, 0x0a, 0x0e, - 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x49, 0x50, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x36, - 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, - 0x45, 0x10, 0x37, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, - 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x34, 0x10, 0x38, 0x12, 0x19, + 0x54, 0x10, 0x26, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, + 0x4f, 0x4f, 0x50, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x52, 0x45, 0x56, + 0x10, 0x27, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, + 0x4b, 0x55, 0x50, 0x34, 0x10, 0x28, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x52, + 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53, 0x45, + 0x4c, 0x10, 0x29, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, + 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x2a, 0x12, 0x17, 0x0a, + 0x13, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x46, + 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x2b, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, + 0x56, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x2c, + 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, + 0x59, 0x10, 0x2d, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x4e, 0x45, 0x54, 0x44, 0x45, + 0x56, 0x5f, 0x49, 0x4e, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x10, 0x2e, 0x12, 0x15, + 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x4e, 0x45, 0x54, 0x44, 0x45, 0x56, 0x5f, 0x45, 0x4e, 0x43, + 0x41, 0x50, 0x34, 0x10, 0x2f, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, + 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x5f, 0x31, 0x10, 0x30, 0x12, 0x14, 0x0a, 0x10, 0x44, + 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x5f, 0x32, 0x10, + 0x31, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, + 0x54, 0x45, 0x44, 0x34, 0x10, 0x32, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, + 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x5f, 0x31, 0x10, 0x33, 0x12, 0x14, 0x0a, 0x10, + 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x5f, 0x32, + 0x10, 0x34, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x36, 0x10, 0x35, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, + 0x4b, 0x49, 0x50, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x36, 0x12, 0x11, 0x0a, 0x0d, 0x44, + 0x42, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x37, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, - 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x36, 0x10, 0x39, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, - 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, - 0x45, 0x44, 0x34, 0x10, 0x3a, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, - 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x36, 0x10, - 0x3b, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x54, 0x41, 0x4c, - 0x45, 0x5f, 0x43, 0x54, 0x10, 0x3c, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x4e, - 0x48, 0x45, 0x52, 0x49, 0x54, 0x5f, 0x49, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x59, 0x10, 0x3d, - 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, - 0x50, 0x34, 0x10, 0x3e, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c, - 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x10, 0x3f, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, - 0x53, 0x4b, 0x5f, 0x41, 0x53, 0x53, 0x49, 0x47, 0x4e, 0x10, 0x40, 0x12, 0x0d, 0x0a, 0x09, 0x44, - 0x42, 0x47, 0x5f, 0x4c, 0x37, 0x5f, 0x4c, 0x42, 0x10, 0x41, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, - 0x47, 0x5f, 0x53, 0x4b, 0x49, 0x50, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x10, 0x42, 0x42, - 0x26, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, - 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x76, 0x31, 0x2f, 0x66, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x34, 0x10, 0x38, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, + 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, + 0x44, 0x36, 0x10, 0x39, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, + 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x34, 0x10, 0x3a, + 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, + 0x50, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x36, 0x10, 0x3b, 0x12, 0x13, 0x0a, 0x0f, + 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f, 0x43, 0x54, 0x10, + 0x3c, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x4e, 0x48, 0x45, 0x52, 0x49, 0x54, + 0x5f, 0x49, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x59, 0x10, 0x3d, 0x12, 0x12, 0x0a, 0x0e, 0x44, + 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x10, 0x3e, 0x12, + 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, + 0x36, 0x10, 0x3f, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x41, 0x53, + 0x53, 0x49, 0x47, 0x4e, 0x10, 0x40, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x37, + 0x5f, 0x4c, 0x42, 0x10, 0x41, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x49, + 0x50, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x10, 0x42, 0x42, 0x26, 0x5a, 0x24, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, + 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x6c, + 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -5553,36 +5571,37 @@ var file_flow_flow_proto_depIdxs = []int32{ 7, // 39: flow.FlowFilter.traffic_direction:type_name -> flow.TrafficDirection 5, // 40: flow.FlowFilter.verdict:type_name -> flow.Verdict 30, // 41: flow.FlowFilter.event_type:type_name -> flow.EventTypeFilter - 24, // 42: flow.FlowFilter.tcp_flags:type_name -> flow.TCPFlags - 4, // 43: flow.FlowFilter.ip_version:type_name -> flow.IPVersion - 34, // 44: flow.HTTP.headers:type_name -> flow.HTTPHeader - 10, // 45: flow.LostEvent.source:type_name -> flow.LostEventSource - 54, // 46: flow.LostEvent.cpu:type_name -> google.protobuf.Int32Value - 11, // 47: flow.AgentEvent.type:type_name -> flow.AgentEventType - 40, // 48: flow.AgentEvent.unknown:type_name -> flow.AgentEventUnknown - 41, // 49: flow.AgentEvent.agent_start:type_name -> flow.TimeNotification - 42, // 50: flow.AgentEvent.policy_update:type_name -> flow.PolicyUpdateNotification - 43, // 51: flow.AgentEvent.endpoint_regenerate:type_name -> flow.EndpointRegenNotification - 44, // 52: flow.AgentEvent.endpoint_update:type_name -> flow.EndpointUpdateNotification - 45, // 53: flow.AgentEvent.ipcache_update:type_name -> flow.IPCacheNotification - 47, // 54: flow.AgentEvent.service_upsert:type_name -> flow.ServiceUpsertNotification - 48, // 55: flow.AgentEvent.service_delete:type_name -> flow.ServiceDeleteNotification - 51, // 56: flow.TimeNotification.time:type_name -> google.protobuf.Timestamp - 55, // 57: flow.IPCacheNotification.old_identity:type_name -> google.protobuf.UInt32Value - 46, // 58: flow.ServiceUpsertNotification.frontend_address:type_name -> flow.ServiceUpsertNotificationAddr - 46, // 59: flow.ServiceUpsertNotification.backend_addresses:type_name -> flow.ServiceUpsertNotificationAddr - 13, // 60: flow.DebugEvent.type:type_name -> flow.DebugEventType - 19, // 61: flow.DebugEvent.source:type_name -> flow.Endpoint - 55, // 62: flow.DebugEvent.hash:type_name -> google.protobuf.UInt32Value - 55, // 63: flow.DebugEvent.arg1:type_name -> google.protobuf.UInt32Value - 55, // 64: flow.DebugEvent.arg2:type_name -> google.protobuf.UInt32Value - 55, // 65: flow.DebugEvent.arg3:type_name -> google.protobuf.UInt32Value - 54, // 66: flow.DebugEvent.cpu:type_name -> google.protobuf.Int32Value - 67, // [67:67] is the sub-list for method output_type - 67, // [67:67] is the sub-list for method input_type - 67, // [67:67] is the sub-list for extension type_name - 67, // [67:67] is the sub-list for extension extendee - 0, // [0:67] is the sub-list for field type_name + 34, // 42: flow.FlowFilter.http_header:type_name -> flow.HTTPHeader + 24, // 43: flow.FlowFilter.tcp_flags:type_name -> flow.TCPFlags + 4, // 44: flow.FlowFilter.ip_version:type_name -> flow.IPVersion + 34, // 45: flow.HTTP.headers:type_name -> flow.HTTPHeader + 10, // 46: flow.LostEvent.source:type_name -> flow.LostEventSource + 54, // 47: flow.LostEvent.cpu:type_name -> google.protobuf.Int32Value + 11, // 48: flow.AgentEvent.type:type_name -> flow.AgentEventType + 40, // 49: flow.AgentEvent.unknown:type_name -> flow.AgentEventUnknown + 41, // 50: flow.AgentEvent.agent_start:type_name -> flow.TimeNotification + 42, // 51: flow.AgentEvent.policy_update:type_name -> flow.PolicyUpdateNotification + 43, // 52: flow.AgentEvent.endpoint_regenerate:type_name -> flow.EndpointRegenNotification + 44, // 53: flow.AgentEvent.endpoint_update:type_name -> flow.EndpointUpdateNotification + 45, // 54: flow.AgentEvent.ipcache_update:type_name -> flow.IPCacheNotification + 47, // 55: flow.AgentEvent.service_upsert:type_name -> flow.ServiceUpsertNotification + 48, // 56: flow.AgentEvent.service_delete:type_name -> flow.ServiceDeleteNotification + 51, // 57: flow.TimeNotification.time:type_name -> google.protobuf.Timestamp + 55, // 58: flow.IPCacheNotification.old_identity:type_name -> google.protobuf.UInt32Value + 46, // 59: flow.ServiceUpsertNotification.frontend_address:type_name -> flow.ServiceUpsertNotificationAddr + 46, // 60: flow.ServiceUpsertNotification.backend_addresses:type_name -> flow.ServiceUpsertNotificationAddr + 13, // 61: flow.DebugEvent.type:type_name -> flow.DebugEventType + 19, // 62: flow.DebugEvent.source:type_name -> flow.Endpoint + 55, // 63: flow.DebugEvent.hash:type_name -> google.protobuf.UInt32Value + 55, // 64: flow.DebugEvent.arg1:type_name -> google.protobuf.UInt32Value + 55, // 65: flow.DebugEvent.arg2:type_name -> google.protobuf.UInt32Value + 55, // 66: flow.DebugEvent.arg3:type_name -> google.protobuf.UInt32Value + 54, // 67: flow.DebugEvent.cpu:type_name -> google.protobuf.Int32Value + 68, // [68:68] is the sub-list for method output_type + 68, // [68:68] is the sub-list for method input_type + 68, // [68:68] is the sub-list for extension type_name + 68, // [68:68] is the sub-list for extension extendee + 0, // [0:68] is the sub-list for field type_name } func init() { file_flow_flow_proto_init() } diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto b/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto index e2b1b2131b..ff8a9f4fc1 100644 --- a/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto +++ b/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto @@ -402,6 +402,7 @@ enum DropReason { UNENCRYPTED_TRAFFIC = 195; TTL_EXCEEDED = 196; NO_NODE_ID = 197; + DROP_RATE_LIMITED = 198; } enum TrafficDirection { @@ -470,7 +471,8 @@ message FlowFilter { // source_pod filters by a list of source pod name prefixes, optionally // within a given namespace (e.g. "xwing", "kube-system/coredns-"). // The pod name can be omitted to only filter by namespace - // (e.g. "kube-system/") + // (e.g. "kube-system/") or the namespace can be omitted to filter for + // pods in any namespace (e.g. "/xwing") repeated string source_pod = 2; // source_fqdn filters by a list of source fully qualified domain names repeated string source_fqdn = 7; @@ -533,6 +535,8 @@ message FlowFilter { repeated string http_path = 22; // http_url is a list of regular expressions to filter on the HTTP URL. repeated string http_url = 31; + // http_header is a list of key:value pairs to filter on the HTTP headers. + repeated HTTPHeader http_header = 32; // tcp_flags filters flows based on TCP header flags repeated TCPFlags tcp_flags = 23; diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/flow_deepcopy.pb.go b/vendor/github.com/cilium/cilium/api/v1/flow/flow_deepcopy.pb.go deleted file mode 100644 index 94dc44316b..0000000000 --- a/vendor/github.com/cilium/cilium/api/v1/flow/flow_deepcopy.pb.go +++ /dev/null @@ -1,784 +0,0 @@ -// Code generated by protoc-gen-deepcopy. DO NOT EDIT. - -package flow - -import ( - proto "google.golang.org/protobuf/proto" -) - -// DeepCopyInto supports using Flow within kubernetes types, where deepcopy-gen is used. -func (in *Flow) DeepCopyInto(out *Flow) { - p := proto.Clone(in).(*Flow) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Flow. Required by controller-gen. -func (in *Flow) DeepCopy() *Flow { - if in == nil { - return nil - } - out := new(Flow) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Flow. Required by controller-gen. -func (in *Flow) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using Layer4 within kubernetes types, where deepcopy-gen is used. -func (in *Layer4) DeepCopyInto(out *Layer4) { - p := proto.Clone(in).(*Layer4) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Layer4. Required by controller-gen. -func (in *Layer4) DeepCopy() *Layer4 { - if in == nil { - return nil - } - out := new(Layer4) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Layer4. Required by controller-gen. -func (in *Layer4) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using Layer7 within kubernetes types, where deepcopy-gen is used. -func (in *Layer7) DeepCopyInto(out *Layer7) { - p := proto.Clone(in).(*Layer7) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Layer7. Required by controller-gen. -func (in *Layer7) DeepCopy() *Layer7 { - if in == nil { - return nil - } - out := new(Layer7) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Layer7. Required by controller-gen. -func (in *Layer7) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using TraceContext within kubernetes types, where deepcopy-gen is used. -func (in *TraceContext) DeepCopyInto(out *TraceContext) { - p := proto.Clone(in).(*TraceContext) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TraceContext. Required by controller-gen. -func (in *TraceContext) DeepCopy() *TraceContext { - if in == nil { - return nil - } - out := new(TraceContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new TraceContext. Required by controller-gen. -func (in *TraceContext) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using TraceParent within kubernetes types, where deepcopy-gen is used. -func (in *TraceParent) DeepCopyInto(out *TraceParent) { - p := proto.Clone(in).(*TraceParent) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TraceParent. Required by controller-gen. -func (in *TraceParent) DeepCopy() *TraceParent { - if in == nil { - return nil - } - out := new(TraceParent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new TraceParent. Required by controller-gen. -func (in *TraceParent) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using Endpoint within kubernetes types, where deepcopy-gen is used. -func (in *Endpoint) DeepCopyInto(out *Endpoint) { - p := proto.Clone(in).(*Endpoint) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. Required by controller-gen. -func (in *Endpoint) DeepCopy() *Endpoint { - if in == nil { - return nil - } - out := new(Endpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. Required by controller-gen. -func (in *Endpoint) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using Workload within kubernetes types, where deepcopy-gen is used. -func (in *Workload) DeepCopyInto(out *Workload) { - p := proto.Clone(in).(*Workload) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workload. Required by controller-gen. -func (in *Workload) DeepCopy() *Workload { - if in == nil { - return nil - } - out := new(Workload) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Workload. Required by controller-gen. -func (in *Workload) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using TCP within kubernetes types, where deepcopy-gen is used. -func (in *TCP) DeepCopyInto(out *TCP) { - p := proto.Clone(in).(*TCP) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCP. Required by controller-gen. -func (in *TCP) DeepCopy() *TCP { - if in == nil { - return nil - } - out := new(TCP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new TCP. Required by controller-gen. -func (in *TCP) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using IP within kubernetes types, where deepcopy-gen is used. -func (in *IP) DeepCopyInto(out *IP) { - p := proto.Clone(in).(*IP) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IP. Required by controller-gen. -func (in *IP) DeepCopy() *IP { - if in == nil { - return nil - } - out := new(IP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new IP. Required by controller-gen. -func (in *IP) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using Ethernet within kubernetes types, where deepcopy-gen is used. -func (in *Ethernet) DeepCopyInto(out *Ethernet) { - p := proto.Clone(in).(*Ethernet) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ethernet. Required by controller-gen. -func (in *Ethernet) DeepCopy() *Ethernet { - if in == nil { - return nil - } - out := new(Ethernet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Ethernet. Required by controller-gen. -func (in *Ethernet) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using TCPFlags within kubernetes types, where deepcopy-gen is used. -func (in *TCPFlags) DeepCopyInto(out *TCPFlags) { - p := proto.Clone(in).(*TCPFlags) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFlags. Required by controller-gen. -func (in *TCPFlags) DeepCopy() *TCPFlags { - if in == nil { - return nil - } - out := new(TCPFlags) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new TCPFlags. Required by controller-gen. -func (in *TCPFlags) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using UDP within kubernetes types, where deepcopy-gen is used. -func (in *UDP) DeepCopyInto(out *UDP) { - p := proto.Clone(in).(*UDP) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDP. Required by controller-gen. -func (in *UDP) DeepCopy() *UDP { - if in == nil { - return nil - } - out := new(UDP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new UDP. Required by controller-gen. -func (in *UDP) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using SCTP within kubernetes types, where deepcopy-gen is used. -func (in *SCTP) DeepCopyInto(out *SCTP) { - p := proto.Clone(in).(*SCTP) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SCTP. Required by controller-gen. -func (in *SCTP) DeepCopy() *SCTP { - if in == nil { - return nil - } - out := new(SCTP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new SCTP. Required by controller-gen. -func (in *SCTP) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using ICMPv4 within kubernetes types, where deepcopy-gen is used. -func (in *ICMPv4) DeepCopyInto(out *ICMPv4) { - p := proto.Clone(in).(*ICMPv4) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ICMPv4. Required by controller-gen. -func (in *ICMPv4) DeepCopy() *ICMPv4 { - if in == nil { - return nil - } - out := new(ICMPv4) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ICMPv4. Required by controller-gen. -func (in *ICMPv4) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using ICMPv6 within kubernetes types, where deepcopy-gen is used. -func (in *ICMPv6) DeepCopyInto(out *ICMPv6) { - p := proto.Clone(in).(*ICMPv6) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ICMPv6. Required by controller-gen. -func (in *ICMPv6) DeepCopy() *ICMPv6 { - if in == nil { - return nil - } - out := new(ICMPv6) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ICMPv6. Required by controller-gen. -func (in *ICMPv6) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using Policy within kubernetes types, where deepcopy-gen is used. -func (in *Policy) DeepCopyInto(out *Policy) { - p := proto.Clone(in).(*Policy) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. Required by controller-gen. -func (in *Policy) DeepCopy() *Policy { - if in == nil { - return nil - } - out := new(Policy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Policy. Required by controller-gen. -func (in *Policy) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using EventTypeFilter within kubernetes types, where deepcopy-gen is used. -func (in *EventTypeFilter) DeepCopyInto(out *EventTypeFilter) { - p := proto.Clone(in).(*EventTypeFilter) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeFilter. Required by controller-gen. -func (in *EventTypeFilter) DeepCopy() *EventTypeFilter { - if in == nil { - return nil - } - out := new(EventTypeFilter) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeFilter. Required by controller-gen. -func (in *EventTypeFilter) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using CiliumEventType within kubernetes types, where deepcopy-gen is used. -func (in *CiliumEventType) DeepCopyInto(out *CiliumEventType) { - p := proto.Clone(in).(*CiliumEventType) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEventType. Required by controller-gen. -func (in *CiliumEventType) DeepCopy() *CiliumEventType { - if in == nil { - return nil - } - out := new(CiliumEventType) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEventType. Required by controller-gen. -func (in *CiliumEventType) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using FlowFilter within kubernetes types, where deepcopy-gen is used. -func (in *FlowFilter) DeepCopyInto(out *FlowFilter) { - p := proto.Clone(in).(*FlowFilter) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowFilter. Required by controller-gen. -func (in *FlowFilter) DeepCopy() *FlowFilter { - if in == nil { - return nil - } - out := new(FlowFilter) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new FlowFilter. Required by controller-gen. -func (in *FlowFilter) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using DNS within kubernetes types, where deepcopy-gen is used. -func (in *DNS) DeepCopyInto(out *DNS) { - p := proto.Clone(in).(*DNS) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. Required by controller-gen. -func (in *DNS) DeepCopy() *DNS { - if in == nil { - return nil - } - out := new(DNS) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new DNS. Required by controller-gen. -func (in *DNS) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using HTTPHeader within kubernetes types, where deepcopy-gen is used. -func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { - p := proto.Clone(in).(*HTTPHeader) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. Required by controller-gen. -func (in *HTTPHeader) DeepCopy() *HTTPHeader { - if in == nil { - return nil - } - out := new(HTTPHeader) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. Required by controller-gen. -func (in *HTTPHeader) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using HTTP within kubernetes types, where deepcopy-gen is used. -func (in *HTTP) DeepCopyInto(out *HTTP) { - p := proto.Clone(in).(*HTTP) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTP. Required by controller-gen. -func (in *HTTP) DeepCopy() *HTTP { - if in == nil { - return nil - } - out := new(HTTP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new HTTP. Required by controller-gen. -func (in *HTTP) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using Kafka within kubernetes types, where deepcopy-gen is used. -func (in *Kafka) DeepCopyInto(out *Kafka) { - p := proto.Clone(in).(*Kafka) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kafka. Required by controller-gen. -func (in *Kafka) DeepCopy() *Kafka { - if in == nil { - return nil - } - out := new(Kafka) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Kafka. Required by controller-gen. -func (in *Kafka) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using Service within kubernetes types, where deepcopy-gen is used. -func (in *Service) DeepCopyInto(out *Service) { - p := proto.Clone(in).(*Service) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. Required by controller-gen. -func (in *Service) DeepCopy() *Service { - if in == nil { - return nil - } - out := new(Service) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Service. Required by controller-gen. -func (in *Service) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using LostEvent within kubernetes types, where deepcopy-gen is used. -func (in *LostEvent) DeepCopyInto(out *LostEvent) { - p := proto.Clone(in).(*LostEvent) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LostEvent. Required by controller-gen. -func (in *LostEvent) DeepCopy() *LostEvent { - if in == nil { - return nil - } - out := new(LostEvent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new LostEvent. Required by controller-gen. -func (in *LostEvent) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using AgentEvent within kubernetes types, where deepcopy-gen is used. -func (in *AgentEvent) DeepCopyInto(out *AgentEvent) { - p := proto.Clone(in).(*AgentEvent) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentEvent. Required by controller-gen. -func (in *AgentEvent) DeepCopy() *AgentEvent { - if in == nil { - return nil - } - out := new(AgentEvent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new AgentEvent. Required by controller-gen. -func (in *AgentEvent) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using AgentEventUnknown within kubernetes types, where deepcopy-gen is used. -func (in *AgentEventUnknown) DeepCopyInto(out *AgentEventUnknown) { - p := proto.Clone(in).(*AgentEventUnknown) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentEventUnknown. Required by controller-gen. -func (in *AgentEventUnknown) DeepCopy() *AgentEventUnknown { - if in == nil { - return nil - } - out := new(AgentEventUnknown) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new AgentEventUnknown. Required by controller-gen. -func (in *AgentEventUnknown) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using TimeNotification within kubernetes types, where deepcopy-gen is used. -func (in *TimeNotification) DeepCopyInto(out *TimeNotification) { - p := proto.Clone(in).(*TimeNotification) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeNotification. Required by controller-gen. -func (in *TimeNotification) DeepCopy() *TimeNotification { - if in == nil { - return nil - } - out := new(TimeNotification) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new TimeNotification. Required by controller-gen. -func (in *TimeNotification) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using PolicyUpdateNotification within kubernetes types, where deepcopy-gen is used. -func (in *PolicyUpdateNotification) DeepCopyInto(out *PolicyUpdateNotification) { - p := proto.Clone(in).(*PolicyUpdateNotification) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyUpdateNotification. Required by controller-gen. -func (in *PolicyUpdateNotification) DeepCopy() *PolicyUpdateNotification { - if in == nil { - return nil - } - out := new(PolicyUpdateNotification) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new PolicyUpdateNotification. Required by controller-gen. -func (in *PolicyUpdateNotification) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using EndpointRegenNotification within kubernetes types, where deepcopy-gen is used. -func (in *EndpointRegenNotification) DeepCopyInto(out *EndpointRegenNotification) { - p := proto.Clone(in).(*EndpointRegenNotification) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointRegenNotification. Required by controller-gen. -func (in *EndpointRegenNotification) DeepCopy() *EndpointRegenNotification { - if in == nil { - return nil - } - out := new(EndpointRegenNotification) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new EndpointRegenNotification. Required by controller-gen. -func (in *EndpointRegenNotification) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using EndpointUpdateNotification within kubernetes types, where deepcopy-gen is used. -func (in *EndpointUpdateNotification) DeepCopyInto(out *EndpointUpdateNotification) { - p := proto.Clone(in).(*EndpointUpdateNotification) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointUpdateNotification. Required by controller-gen. -func (in *EndpointUpdateNotification) DeepCopy() *EndpointUpdateNotification { - if in == nil { - return nil - } - out := new(EndpointUpdateNotification) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new EndpointUpdateNotification. Required by controller-gen. -func (in *EndpointUpdateNotification) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using IPCacheNotification within kubernetes types, where deepcopy-gen is used. -func (in *IPCacheNotification) DeepCopyInto(out *IPCacheNotification) { - p := proto.Clone(in).(*IPCacheNotification) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPCacheNotification. Required by controller-gen. -func (in *IPCacheNotification) DeepCopy() *IPCacheNotification { - if in == nil { - return nil - } - out := new(IPCacheNotification) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new IPCacheNotification. Required by controller-gen. -func (in *IPCacheNotification) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using ServiceUpsertNotificationAddr within kubernetes types, where deepcopy-gen is used. -func (in *ServiceUpsertNotificationAddr) DeepCopyInto(out *ServiceUpsertNotificationAddr) { - p := proto.Clone(in).(*ServiceUpsertNotificationAddr) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceUpsertNotificationAddr. Required by controller-gen. -func (in *ServiceUpsertNotificationAddr) DeepCopy() *ServiceUpsertNotificationAddr { - if in == nil { - return nil - } - out := new(ServiceUpsertNotificationAddr) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ServiceUpsertNotificationAddr. Required by controller-gen. -func (in *ServiceUpsertNotificationAddr) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using ServiceUpsertNotification within kubernetes types, where deepcopy-gen is used. -func (in *ServiceUpsertNotification) DeepCopyInto(out *ServiceUpsertNotification) { - p := proto.Clone(in).(*ServiceUpsertNotification) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceUpsertNotification. Required by controller-gen. -func (in *ServiceUpsertNotification) DeepCopy() *ServiceUpsertNotification { - if in == nil { - return nil - } - out := new(ServiceUpsertNotification) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ServiceUpsertNotification. Required by controller-gen. -func (in *ServiceUpsertNotification) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using ServiceDeleteNotification within kubernetes types, where deepcopy-gen is used. -func (in *ServiceDeleteNotification) DeepCopyInto(out *ServiceDeleteNotification) { - p := proto.Clone(in).(*ServiceDeleteNotification) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDeleteNotification. Required by controller-gen. -func (in *ServiceDeleteNotification) DeepCopy() *ServiceDeleteNotification { - if in == nil { - return nil - } - out := new(ServiceDeleteNotification) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDeleteNotification. Required by controller-gen. -func (in *ServiceDeleteNotification) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using NetworkInterface within kubernetes types, where deepcopy-gen is used. -func (in *NetworkInterface) DeepCopyInto(out *NetworkInterface) { - p := proto.Clone(in).(*NetworkInterface) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterface. Required by controller-gen. -func (in *NetworkInterface) DeepCopy() *NetworkInterface { - if in == nil { - return nil - } - out := new(NetworkInterface) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterface. Required by controller-gen. -func (in *NetworkInterface) DeepCopyInterface() interface{} { - return in.DeepCopy() -} - -// DeepCopyInto supports using DebugEvent within kubernetes types, where deepcopy-gen is used. -func (in *DebugEvent) DeepCopyInto(out *DebugEvent) { - p := proto.Clone(in).(*DebugEvent) - *out = *p -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DebugEvent. Required by controller-gen. -func (in *DebugEvent) DeepCopy() *DebugEvent { - if in == nil { - return nil - } - out := new(DebugEvent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new DebugEvent. Required by controller-gen. -func (in *DebugEvent) DeepCopyInterface() interface{} { - return in.DeepCopy() -} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy.go new file mode 100644 index 0000000000..d5d484d10c --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy.go @@ -0,0 +1,177 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BgpRoutePolicy Single BGP route policy retrieved from the underlying router +// +// swagger:model BgpRoutePolicy +type BgpRoutePolicy struct { + + // Name of the route policy + Name string `json:"name,omitempty"` + + // Autonomous System Number (ASN) identifying a BGP virtual router instance + RouterAsn int64 `json:"router-asn,omitempty"` + + // List of the route policy statements + Statements []*BgpRoutePolicyStatement `json:"statements"` + + // Type of the route policy + // Enum: [export import] + Type string `json:"type,omitempty"` +} + +// Validate validates this bgp route policy +func (m *BgpRoutePolicy) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateStatements(formats); err != nil { + res = append(res, err) + } + + if err := m.validateType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoutePolicy) validateStatements(formats strfmt.Registry) error { + if swag.IsZero(m.Statements) { // not required + return nil + } + + for i := 0; i < len(m.Statements); i++ { + if swag.IsZero(m.Statements[i]) { // not required + continue + } + + if m.Statements[i] != nil { + if err := m.Statements[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("statements" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("statements" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +var bgpRoutePolicyTypeTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["export","import"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + bgpRoutePolicyTypeTypePropEnum = append(bgpRoutePolicyTypeTypePropEnum, v) + } +} + +const ( + + // BgpRoutePolicyTypeExport captures enum value "export" + BgpRoutePolicyTypeExport string = "export" + + // BgpRoutePolicyTypeImport captures enum value "import" + BgpRoutePolicyTypeImport string = "import" +) + +// prop value enum +func (m *BgpRoutePolicy) validateTypeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, bgpRoutePolicyTypeTypePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *BgpRoutePolicy) validateType(formats strfmt.Registry) error { + if swag.IsZero(m.Type) { // not required + return nil + } + + // value enum + if err := m.validateTypeEnum("type", "body", m.Type); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this bgp route policy based on the context it is used +func (m *BgpRoutePolicy) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateStatements(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoutePolicy) contextValidateStatements(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Statements); i++ { + + if m.Statements[i] != nil { + if err := m.Statements[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("statements" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("statements" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BgpRoutePolicy) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpRoutePolicy) UnmarshalBinary(b []byte) error { + var res BgpRoutePolicy + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_prefix_match.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_prefix_match.go new file mode 100644 index 0000000000..993f91dd6a --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_prefix_match.go @@ -0,0 +1,59 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpRoutePolicyPrefixMatch Matches a CIDR prefix in a BGP route policy +// +// swagger:model BgpRoutePolicyPrefixMatch +type BgpRoutePolicyPrefixMatch struct { + + // CIDR prefix to match with + Cidr string `json:"cidr,omitempty"` + + // Maximal prefix length that will match if it falls under CIDR + PrefixLenMax int64 `json:"prefix-len-max,omitempty"` + + // Minimal prefix length that will match if it falls under CIDR + PrefixLenMin int64 `json:"prefix-len-min,omitempty"` +} + +// Validate validates this bgp route policy prefix match +func (m *BgpRoutePolicyPrefixMatch) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this bgp route policy prefix match based on context it is used +func (m *BgpRoutePolicyPrefixMatch) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BgpRoutePolicyPrefixMatch) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpRoutePolicyPrefixMatch) UnmarshalBinary(b []byte) error { + var res BgpRoutePolicyPrefixMatch + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_statement.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_statement.go new file mode 100644 index 0000000000..09cdbd47e9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_statement.go @@ -0,0 +1,186 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BgpRoutePolicyStatement Single BGP route policy statement +// +// swagger:model BgpRoutePolicyStatement +type BgpRoutePolicyStatement struct { + + // List of BGP standard community values to be added to the matched route + AddCommunities []string `json:"add-communities"` + + // List of BGP large community values to be added to the matched route + AddLargeCommunities []string `json:"add-large-communities"` + + // Matches any of the provided BGP neighbor IP addresses. If empty matches all neighbors. + MatchNeighbors []string `json:"match-neighbors"` + + // Matches any of the provided prefixes. If empty matches all prefixes. + MatchPrefixes []*BgpRoutePolicyPrefixMatch `json:"match-prefixes"` + + // RIB processing action taken on the matched route + // Enum: [none accept reject] + RouteAction string `json:"route-action,omitempty"` + + // BGP local preference value to be set on the matched route + SetLocalPreference int64 `json:"set-local-preference,omitempty"` +} + +// Validate validates this bgp route policy statement +func (m *BgpRoutePolicyStatement) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMatchPrefixes(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRouteAction(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoutePolicyStatement) validateMatchPrefixes(formats strfmt.Registry) error { + if swag.IsZero(m.MatchPrefixes) { // not required + return nil + } + + for i := 0; i < len(m.MatchPrefixes); i++ { + if swag.IsZero(m.MatchPrefixes[i]) { // not required + continue + } + + if m.MatchPrefixes[i] != nil { + if err := m.MatchPrefixes[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match-prefixes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match-prefixes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +var bgpRoutePolicyStatementTypeRouteActionPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["none","accept","reject"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + bgpRoutePolicyStatementTypeRouteActionPropEnum = append(bgpRoutePolicyStatementTypeRouteActionPropEnum, v) + } +} + +const ( + + // BgpRoutePolicyStatementRouteActionNone captures enum value "none" + BgpRoutePolicyStatementRouteActionNone string = "none" + + // BgpRoutePolicyStatementRouteActionAccept captures enum value "accept" + BgpRoutePolicyStatementRouteActionAccept string = "accept" + + // BgpRoutePolicyStatementRouteActionReject captures enum value "reject" + BgpRoutePolicyStatementRouteActionReject string = "reject" +) + +// prop value enum +func (m *BgpRoutePolicyStatement) validateRouteActionEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, bgpRoutePolicyStatementTypeRouteActionPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *BgpRoutePolicyStatement) validateRouteAction(formats strfmt.Registry) error { + if swag.IsZero(m.RouteAction) { // not required + return nil + } + + // value enum + if err := m.validateRouteActionEnum("route-action", "body", m.RouteAction); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this bgp route policy statement based on the context it is used +func (m *BgpRoutePolicyStatement) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateMatchPrefixes(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoutePolicyStatement) contextValidateMatchPrefixes(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.MatchPrefixes); i++ { + + if m.MatchPrefixes[i] != nil { + if err := m.MatchPrefixes[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match-prefixes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match-prefixes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BgpRoutePolicyStatement) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpRoutePolicyStatement) UnmarshalBinary(b []byte) error { + var res BgpRoutePolicyStatement + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go b/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go index d47308d560..c3e9561a55 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go @@ -1134,13 +1134,17 @@ func (m *KubeProxyReplacementFeaturesNat46X64Service) UnmarshalBinary(b []byte) type KubeProxyReplacementFeaturesNodePort struct { // acceleration - // Enum: [None Native Generic] + // Enum: [None Native Generic Best-Effort] Acceleration string `json:"acceleration,omitempty"` // algorithm // Enum: [Random Maglev] Algorithm string `json:"algorithm,omitempty"` + // dsr mode + // Enum: [IP Option/Extension IPIP Geneve] + DsrMode string `json:"dsrMode,omitempty"` + // enabled Enabled bool `json:"enabled,omitempty"` @@ -1170,6 +1174,10 @@ func (m *KubeProxyReplacementFeaturesNodePort) Validate(formats strfmt.Registry) res = append(res, err) } + if err := m.validateDsrMode(formats); err != nil { + res = append(res, err) + } + if err := m.validateMode(formats); err != nil { res = append(res, err) } @@ -1184,7 +1192,7 @@ var kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum []interface{} func init() { var res []string - if err := json.Unmarshal([]byte(`["None","Native","Generic"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["None","Native","Generic","Best-Effort"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -1202,6 +1210,9 @@ const ( // KubeProxyReplacementFeaturesNodePortAccelerationGeneric captures enum value "Generic" KubeProxyReplacementFeaturesNodePortAccelerationGeneric string = "Generic" + + // KubeProxyReplacementFeaturesNodePortAccelerationBestDashEffort captures enum value "Best-Effort" + KubeProxyReplacementFeaturesNodePortAccelerationBestDashEffort string = "Best-Effort" ) // prop value enum @@ -1267,6 +1278,51 @@ func (m *KubeProxyReplacementFeaturesNodePort) validateAlgorithm(formats strfmt. return nil } +var kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["IP Option/Extension","IPIP","Geneve"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum = append(kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum, v) + } +} + +const ( + + // KubeProxyReplacementFeaturesNodePortDsrModeIPOptionExtension captures enum value "IP Option/Extension" + KubeProxyReplacementFeaturesNodePortDsrModeIPOptionExtension string = "IP Option/Extension" + + // KubeProxyReplacementFeaturesNodePortDsrModeIPIP captures enum value "IPIP" + KubeProxyReplacementFeaturesNodePortDsrModeIPIP string = "IPIP" + + // KubeProxyReplacementFeaturesNodePortDsrModeGeneve captures enum value "Geneve" + KubeProxyReplacementFeaturesNodePortDsrModeGeneve string = "Geneve" +) + +// prop value enum +func (m *KubeProxyReplacementFeaturesNodePort) validateDsrModeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *KubeProxyReplacementFeaturesNodePort) validateDsrMode(formats strfmt.Registry) error { + if swag.IsZero(m.DsrMode) { // not required + return nil + } + + // value enum + if err := m.validateDsrModeEnum("features"+"."+"nodePort"+"."+"dsrMode", "body", m.DsrMode); err != nil { + return err + } + + return nil +} + var kubeProxyReplacementFeaturesNodePortTypeModePropEnum []interface{} func init() { diff --git a/vendor/github.com/cilium/cilium/api/v1/models/label.go b/vendor/github.com/cilium/cilium/api/v1/models/label.go new file mode 100644 index 0000000000..7e4225fee1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/label.go @@ -0,0 +1,59 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Label Label is the Cilium's representation of a container label +// +// swagger:model Label +type Label struct { + + // key + Key string `json:"key,omitempty"` + + // Source can be one of the above values (e.g. LabelSourceContainer) + Source string `json:"source,omitempty"` + + // value + Value string `json:"value,omitempty"` +} + +// Validate validates this label +func (m *Label) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this label based on context it is used +func (m *Label) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Label) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Label) UnmarshalBinary(b []byte) error { + var res Label + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/label_array.go b/vendor/github.com/cilium/cilium/api/v1/models/label_array.go new file mode 100644 index 0000000000..ca052c05f0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/label_array.go @@ -0,0 +1,76 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// LabelArray LabelArray is an array of labels forming a set +// +// swagger:model LabelArray +type LabelArray []*Label + +// Validate validates this label array +func (m LabelArray) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this label array based on the context it is used +func (m LabelArray) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/node_element.go b/vendor/github.com/cilium/cilium/api/v1/models/node_element.go index a3d06e6605..7f0ce2c6be 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/node_element.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/node_element.go @@ -40,6 +40,9 @@ type NodeElement struct { // Alternative addresses assigned to the node SecondaryAddresses []*NodeAddressingElement `json:"secondary-addresses"` + + // Source of the node configuration + Source string `json:"source,omitempty"` } // Validate validates this node element diff --git a/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go b/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go index 0a62efa4b1..80aa38532d 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go @@ -11,6 +11,7 @@ package models import ( "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" ) @@ -24,7 +25,7 @@ type SelectorIdentityMapping struct { Identities []int64 `json:"identities"` // Labels are the metadata labels associated with the selector - Labels interface{} `json:"labels,omitempty"` + Labels LabelArray `json:"labels,omitempty"` // string form of selector Selector string `json:"selector,omitempty"` @@ -35,11 +36,60 @@ type SelectorIdentityMapping struct { // Validate validates this selector identity mapping func (m *SelectorIdentityMapping) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateLabels(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SelectorIdentityMapping) validateLabels(formats strfmt.Registry) error { + if swag.IsZero(m.Labels) { // not required + return nil + } + + if err := m.Labels.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + return nil } -// ContextValidate validates this selector identity mapping based on context it is used +// ContextValidate validate this selector identity mapping based on the context it is used func (m *SelectorIdentityMapping) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateLabels(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SelectorIdentityMapping) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Labels.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + return nil } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/srv6.go b/vendor/github.com/cilium/cilium/api/v1/models/srv6.go new file mode 100644 index 0000000000..0e6531aedc --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/srv6.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Srv6 Status of the SRv6 +// +// +k8s:deepcopy-gen=true +// +// swagger:model Srv6 +type Srv6 struct { + + // enabled + Enabled bool `json:"enabled,omitempty"` + + // srv6 encap mode + // Enum: [SRH Reduced] + Srv6EncapMode string `json:"srv6EncapMode,omitempty"` +} + +// Validate validates this srv6 +func (m *Srv6) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateSrv6EncapMode(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var srv6TypeSrv6EncapModePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["SRH","Reduced"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + srv6TypeSrv6EncapModePropEnum = append(srv6TypeSrv6EncapModePropEnum, v) + } +} + +const ( + + // Srv6Srv6EncapModeSRH captures enum value "SRH" + Srv6Srv6EncapModeSRH string = "SRH" + + // Srv6Srv6EncapModeReduced captures enum value "Reduced" + Srv6Srv6EncapModeReduced string = "Reduced" +) + +// prop value enum +func (m *Srv6) validateSrv6EncapModeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, srv6TypeSrv6EncapModePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *Srv6) validateSrv6EncapMode(formats strfmt.Registry) error { + if swag.IsZero(m.Srv6EncapMode) { // not required + return nil + } + + // value enum + if err := m.validateSrv6EncapModeEnum("srv6EncapMode", "body", m.Srv6EncapMode); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this srv6 based on context it is used +func (m *Srv6) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Srv6) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Srv6) UnmarshalBinary(b []byte) error { + var res Srv6 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/state_d_b_query.go b/vendor/github.com/cilium/cilium/api/v1/models/state_d_b_query.go new file mode 100644 index 0000000000..df5ca5a801 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/state_d_b_query.go @@ -0,0 +1,62 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StateDBQuery StateDB query +// +// swagger:model StateDBQuery +type StateDBQuery struct { + + // Index to query against + Index string `json:"index,omitempty"` + + // Key to query with. Base64 encoded. + Key string `json:"key,omitempty"` + + // LowerBound prefix search or full-matching Get + Lowerbound bool `json:"lowerbound,omitempty"` + + // Name of the table to query + Table string `json:"table,omitempty"` +} + +// Validate validates this state d b query +func (m *StateDBQuery) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this state d b query based on context it is used +func (m *StateDBQuery) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StateDBQuery) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StateDBQuery) UnmarshalBinary(b []byte) error { + var res StateDBQuery + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/status_response.go b/vendor/github.com/cilium/cilium/api/v1/models/status_response.go index d661530ebb..52b18d92f3 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/status_response.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/status_response.go @@ -105,6 +105,9 @@ type StatusResponse struct { // Status of proxy Proxy *ProxyStatus `json:"proxy,omitempty"` + // Status of SRv6 + Srv6 *Srv6 `json:"srv6,omitempty"` + // List of stale information in the status Stale map[string]strfmt.DateTime `json:"stale,omitempty"` } @@ -213,6 +216,10 @@ func (m *StatusResponse) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateSrv6(formats); err != nil { + res = append(res, err) + } + if err := m.validateStale(formats); err != nil { res = append(res, err) } @@ -696,6 +703,25 @@ func (m *StatusResponse) validateProxy(formats strfmt.Registry) error { return nil } +func (m *StatusResponse) validateSrv6(formats strfmt.Registry) error { + if swag.IsZero(m.Srv6) { // not required + return nil + } + + if m.Srv6 != nil { + if err := m.Srv6.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("srv6") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("srv6") + } + return err + } + } + + return nil +} + func (m *StatusResponse) validateStale(formats strfmt.Registry) error { if swag.IsZero(m.Stale) { // not required return nil @@ -816,6 +842,10 @@ func (m *StatusResponse) ContextValidate(ctx context.Context, formats strfmt.Reg res = append(res, err) } + if err := m.contextValidateSrv6(ctx, formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -1220,6 +1250,22 @@ func (m *StatusResponse) contextValidateProxy(ctx context.Context, formats strfm return nil } +func (m *StatusResponse) contextValidateSrv6(ctx context.Context, formats strfmt.Registry) error { + + if m.Srv6 != nil { + if err := m.Srv6.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("srv6") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("srv6") + } + return err + } + } + + return nil +} + // MarshalBinary interface implementation func (m *StatusResponse) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go index 03d13fde3f..184b167601 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go @@ -1214,6 +1214,22 @@ func (in *RequestResponseStatistics) DeepCopy() *RequestResponseStatistics { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Srv6) DeepCopyInto(out *Srv6) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Srv6. +func (in *Srv6) DeepCopy() *Srv6 { + if in == nil { + return nil + } + out := new(Srv6) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatusResponse) DeepCopyInto(out *StatusResponse) { *out = *in @@ -1348,6 +1364,11 @@ func (in *StatusResponse) DeepCopyInto(out *StatusResponse) { *out = new(ProxyStatus) (*in).DeepCopyInto(*out) } + if in.Srv6 != nil { + in, out := &in.Srv6, &out.Srv6 + *out = new(Srv6) + **out = **in + } if in.Stale != nil { in, out := &in.Stale, &out.Stale *out = make(map[string]strfmt.DateTime, len(*in)) diff --git a/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go b/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go index 32597d0593..c371e91bad 100644 --- a/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go +++ b/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go @@ -172,6 +172,7 @@ const DropReason_NO_EGRESS_GATEWAY = flow.DropReason_NO_EGRESS_GATEWAY const DropReason_UNENCRYPTED_TRAFFIC = flow.DropReason_UNENCRYPTED_TRAFFIC const DropReason_TTL_EXCEEDED = flow.DropReason_TTL_EXCEEDED const DropReason_NO_NODE_ID = flow.DropReason_NO_NODE_ID +const DropReason_DROP_RATE_LIMITED = flow.DropReason_DROP_RATE_LIMITED var DropReason_name = flow.DropReason_name var DropReason_value = flow.DropReason_value diff --git a/vendor/github.com/cilium/cilium/pkg/client/client.go b/vendor/github.com/cilium/cilium/pkg/client/client.go index a1af5a888c..f0f26333d7 100644 --- a/vendor/github.com/cilium/cilium/pkg/client/client.go +++ b/vendor/github.com/cilium/cilium/pkg/client/client.go @@ -107,14 +107,42 @@ func NewDefaultClientWithTimeout(timeout time.Duration) (*Client, error) { // If host is nil then use SockPath provided by CILIUM_SOCK // or the cilium default SockPath func NewClient(host string) (*Client, error) { - clientTrans, err := NewRuntime(host) + clientTrans, err := NewRuntime(WithHost(host)) return &Client{*clientapi.New(clientTrans, strfmt.Default)}, err } -func NewRuntime(host string) (*runtime_client.Runtime, error) { +type runtimeOptions struct { + host string + basePath string +} + +func WithHost(host string) func(options *runtimeOptions) { + return func(options *runtimeOptions) { + options.host = host + } +} + +func WithBasePath(basePath string) func(options *runtimeOptions) { + return func(options *runtimeOptions) { + options.basePath = basePath + } +} + +func NewRuntime(opts ...func(options *runtimeOptions)) (*runtime_client.Runtime, error) { + r := runtimeOptions{} + for _, opt := range opts { + opt(&r) + } + + host := r.host if host == "" { host = DefaultSockPath() } + basePath := r.basePath + if basePath == "" { + basePath = clientapi.DefaultBasePath + } + tmp := strings.SplitN(host, "://", 2) if len(tmp) != 2 { return nil, fmt.Errorf("invalid host format '%s'", host) @@ -138,7 +166,7 @@ func NewRuntime(host string) (*runtime_client.Runtime, error) { transport := configureTransport(nil, tmp[0], host) httpClient := &http.Client{Transport: transport} - clientTrans := runtime_client.NewWithClient(hostHeader, clientapi.DefaultBasePath, + clientTrans := runtime_client.NewWithClient(hostHeader, basePath, clientapi.DefaultSchemes, httpClient) return clientTrans, nil } @@ -322,6 +350,20 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai fmt.Fprintf(w, "\n") } + if sr.Srv6 != nil { + var fields []string + + status := "Disabled" + fields = append(fields, status) + + if sr.Srv6.Enabled { + fields[0] = "Enabled" + fields = append(fields, fmt.Sprintf("[encap-mode: %s]", sr.Srv6.Srv6EncapMode)) + } + + fmt.Fprintf(w, "SRv6:\t%s\n", strings.Join(fields, "\t")) + } + if sr.CniChaining != nil { fmt.Fprintf(w, "CNI Chaining:\t%s\n", sr.CniChaining.Mode) } @@ -606,7 +648,7 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai } if sd.KubeProxyReplacementDetails && sr.Kubernetes != nil && sr.KubeProxyReplacement != nil { - var selection, mode, xdp string + var selection, mode, dsrMode, xdp string lb := "Disabled" cIP := "Enabled" @@ -618,6 +660,10 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai } xdp = np.Acceleration mode = np.Mode + if mode == models.KubeProxyReplacementFeaturesNodePortModeDSR || + mode == models.KubeProxyReplacementFeaturesNodePortModeHybrid { + dsrMode = np.DsrMode + } nPort = fmt.Sprintf("Enabled (Range: %d-%d)", np.PortMin, np.PortMax) lb = "Enabled" } @@ -684,6 +730,9 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai if mode != "" { fmt.Fprintf(tab, " Mode:\t%s\n", mode) } + if dsrMode != "" { + fmt.Fprintf(tab, " DSR Dispatch Mode:\t%s\n", dsrMode) + } if selection != "" { fmt.Fprintf(tab, " Backend Selection:\t%s\n", selection) } diff --git a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go index 2bc8c9af21..0ec16ac989 100644 --- a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go +++ b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go @@ -4,6 +4,7 @@ package types import ( + "errors" "fmt" "github.com/spf13/pflag" @@ -17,24 +18,30 @@ const ( // OptClusterID is the name of the OptClusterID option OptClusterID = "cluster-id" + + // OptMaxConnectedClusters is the name of the OptMaxConnectedClusters option + OptMaxConnectedClusters = "max-connected-clusters" ) // ClusterInfo groups together the ClusterID and the ClusterName type ClusterInfo struct { - ID uint32 `mapstructure:"cluster-id"` - Name string `mapstructure:"cluster-name"` + ID uint32 `mapstructure:"cluster-id"` + Name string `mapstructure:"cluster-name"` + MaxConnectedClusters uint32 `mapstructure:"max-connected-clusters"` } // DefaultClusterInfo represents the default ClusterInfo values. var DefaultClusterInfo = ClusterInfo{ - ID: 0, - Name: defaults.ClusterName, + ID: 0, + Name: defaults.ClusterName, + MaxConnectedClusters: defaults.MaxConnectedClusters, } // Flags implements the cell.Flagger interface, to register the given flags. func (def ClusterInfo) Flags(flags *pflag.FlagSet) { flags.Uint32(OptClusterID, def.ID, "Unique identifier of the cluster") flags.String(OptClusterName, def.Name, "Name of the cluster") + flags.Uint32(OptMaxConnectedClusters, def.MaxConnectedClusters, "Maximum number of clusters to be connected in a clustermesh. Increasing this value will reduce the maximum number of identities available. Valid configurations are [255, 511].") } // Validate validates that the ClusterID is in the valid range (including ClusterID == 0), @@ -66,3 +73,33 @@ func (c ClusterInfo) validateName() error { return nil } + +// ExtendedClusterMeshEnabled returns true if MaxConnectedClusters value has +// been set to a value larger than the default 255. +func (c ClusterInfo) ExtendedClusterMeshEnabled() bool { + return c.MaxConnectedClusters != defaults.MaxConnectedClusters +} + +// ValidateRemoteConfig validates the remote CiliumClusterConfig to ensure +// compatibility with this cluster's configuration. When configRequired is +// false, a missing configuration or one with ID=0 is allowed for backward +// compatibility, otherwise it is flagged as an error. +func (c ClusterInfo) ValidateRemoteConfig(configRequired bool, config *CiliumClusterConfig) error { + if config == nil || config.ID == 0 { + if configRequired || c.ExtendedClusterMeshEnabled() { + return errors.New("remote cluster is missing cluster configuration") + } + + return nil + } + + if err := ValidateClusterID(config.ID); err != nil { + return err + } + + if c.ExtendedClusterMeshEnabled() && (c.MaxConnectedClusters != config.Capabilities.MaxConnectedClusters) { + return fmt.Errorf("mismatched MaxConnectedClusters; local=%d, remote=%d", c.MaxConnectedClusters, config.Capabilities.MaxConnectedClusters) + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go index 783ec77800..8137467fe4 100644 --- a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go @@ -4,18 +4,33 @@ package types import ( - "errors" "fmt" + + "github.com/cilium/cilium/pkg/defaults" ) const ( // ClusterIDMin is the minimum value of the cluster ID - ClusterIDMin = 0 - - // ClusterIDMax is the maximum value of the cluster ID - ClusterIDMax = 255 + ClusterIDMin = 0 + ClusterIDExt511 = 511 ) +// ClusterIDMax is the maximum value of the cluster ID +var ClusterIDMax uint32 = defaults.MaxConnectedClusters + +// InitClusterIDMax validates and sets the ClusterIDMax package level variable. +func (c ClusterInfo) InitClusterIDMax() error { + switch c.MaxConnectedClusters { + case defaults.MaxConnectedClusters, ClusterIDExt511: + ClusterIDMax = c.MaxConnectedClusters + default: + return fmt.Errorf("--%s=%d is invalid; supported values are [%d, %d]", OptMaxConnectedClusters, c.MaxConnectedClusters, defaults.MaxConnectedClusters, ClusterIDExt511) + } + return nil +} + +// ValidateClusterID ensures that the given clusterID is within the configured +// range of the ClusterMesh. func ValidateClusterID(clusterID uint32) error { if clusterID == ClusterIDMin { return fmt.Errorf("ClusterID %d is reserved", ClusterIDMin) @@ -42,6 +57,9 @@ type CiliumClusterConfigCapabilities struct { // kvstore (for instance, by kvstoremesh). This implies that keys are stored // under the dedicated "cilium/cache" prefix, and all are cluster-scoped. Cached bool `json:"cached,omitempty"` + + // The maximum number of clusters the given cluster can support in a ClusterMesh. + MaxConnectedClusters uint32 `json:"maxConnectedClusters,omitempty"` } // ValidationMode defines if a missing CiliumClusterConfig should be allowed for @@ -52,22 +70,3 @@ const ( BackwardCompatible ValidationMode = false Strict ValidationMode = true ) - -// Validate validates the configuration correctness. When the validation mode -// is BackwardCompatible, a missing configuration or with ID=0 is allowed for -// backward compatibility, otherwise it is flagged as an error. -func (c *CiliumClusterConfig) Validate(mode ValidationMode) error { - if c == nil || c.ID == 0 { - if mode == Strict { - return errors.New("remote cluster is missing cluster configuration") - } - - return nil - } - - if err := ValidateClusterID(c.ID); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go index 1a8dbf0cc1..0bc687aa97 100644 --- a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "net" "os" "path/filepath" "strings" @@ -18,7 +19,8 @@ import ( "github.com/cilium/ebpf" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/features" - "github.com/cilium/ebpf/rlimit" + "github.com/google/gopacket" + "github.com/google/gopacket/layers" "golang.org/x/sys/unix" "github.com/cilium/cilium/pkg/command/exec" @@ -431,10 +433,6 @@ func HaveOuterSourceIPSupport() (err error) { } }() - if err := rlimit.RemoveMemlock(); err != nil { - return err - } - progSpec := &ebpf.ProgramSpec{ Name: "set_tunnel_key_probe", Type: ebpf.SchedACT, @@ -484,6 +482,74 @@ func HaveOuterSourceIPSupport() (err error) { return nil } +// HaveSKBAdjustRoomL2RoomMACSupport tests whether the kernel supports the `bpf_skb_adjust_room` helper +// with the `BPF_ADJ_ROOM_MAC` mode. To do so, we create a program that requests the passed in SKB +// to be expanded by 20 bytes. The helper checks the `mode` argument and will return -ENOSUPP if +// the mode is unknown. Otherwise it should resize the SKB by 20 bytes and return 0. +func HaveSKBAdjustRoomL2RoomMACSupport() (err error) { + defer func() { + if err != nil && !errors.Is(err, ebpf.ErrNotSupported) { + log.WithError(err).Fatal("failed to probe for bpf_skb_adjust_room L2 room MAC support") + } + }() + + progSpec := &ebpf.ProgramSpec{ + Name: "adjust_mac_room", + Type: ebpf.SchedCLS, + License: "GPL", + } + progSpec.Instructions = asm.Instructions{ + asm.Mov.Imm(asm.R2, 20), // len_diff + asm.Mov.Imm(asm.R3, 1), // mode: BPF_ADJ_ROOM_MAC + asm.Mov.Imm(asm.R4, 0), // flags: 0 + asm.FnSkbAdjustRoom.Call(), + asm.Return(), + } + prog, err := ebpf.NewProgram(progSpec) + if err != nil { + return err + } + defer prog.Close() + + // This is a Eth + IPv4 + UDP + data packet. The helper relies on a valid packet being passed in + // since it wants to know offsets of the different layers. + buf := gopacket.NewSerializeBuffer() + err = gopacket.SerializeLayers(buf, gopacket.SerializeOptions{}, + &layers.Ethernet{ + DstMAC: net.HardwareAddr{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + SrcMAC: net.HardwareAddr{0x0e, 0xf5, 0x16, 0x3d, 0x6b, 0xab}, + EthernetType: layers.EthernetTypeIPv4, + }, + &layers.IPv4{ + Version: 4, + IHL: 5, + Length: 49, + Id: 0xCECB, + TTL: 64, + Protocol: layers.IPProtocolUDP, + SrcIP: net.IPv4(0xc0, 0xa8, 0xb2, 0x56), + DstIP: net.IPv4(0xc0, 0xa8, 0xb2, 0xff), + }, + &layers.UDP{ + SrcPort: 23939, + DstPort: 32412, + }, + gopacket.Payload("M-SEARCH * HTTP/1.1\x0d\x0a"), + ) + if err != nil { + return fmt.Errorf("craft packet: %w", err) + } + + ret, _, err := prog.Test(buf.Bytes()) + if err != nil { + return err + } + if ret != 0 { + return ebpf.ErrNotSupported + } + return nil +} + // HaveIPv6Support tests whether kernel can open an IPv6 socket. This will // also implicitly auto-load IPv6 kernel module if available and not yet // loaded. @@ -559,11 +625,11 @@ func ExecuteHeaderProbes() *FeatureProbes { // skb related probes {ebpf.SchedCLS, asm.FnSkbChangeTail}, - {ebpf.SchedCLS, asm.FnFibLookup}, {ebpf.SchedCLS, asm.FnCsumLevel}, // xdp related probes - {ebpf.XDP, asm.FnFibLookup}, + {ebpf.XDP, asm.FnXdpLoadBytes}, + {ebpf.XDP, asm.FnXdpStoreBytes}, } for _, ph := range progHelpers { probes.ProgramHelpers[ph] = (HaveProgramHelper(ph.Program, ph.Helper) == nil) @@ -601,7 +667,6 @@ func writeCommonHeader(writer io.Writer, probes *FeatureProbes) error { func writeSkbHeader(writer io.Writer, probes *FeatureProbes) error { featuresSkb := map[string]bool{ "HAVE_CHANGE_TAIL": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnSkbChangeTail}], - "HAVE_FIB_LOOKUP": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnFibLookup}], "HAVE_CSUM_LEVEL": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnCsumLevel}], } @@ -611,7 +676,8 @@ func writeSkbHeader(writer io.Writer, probes *FeatureProbes) error { // writeXdpHeader defines macros for bpf/include/bpf/features_xdp.h func writeXdpHeader(writer io.Writer, probes *FeatureProbes) error { featuresXdp := map[string]bool{ - "HAVE_FIB_LOOKUP": probes.ProgramHelpers[ProgramHelper{ebpf.XDP, asm.FnFibLookup}], + "HAVE_XDP_LOAD_BYTES": probes.ProgramHelpers[ProgramHelper{ebpf.XDP, asm.FnXdpLoadBytes}], + "HAVE_XDP_STORE_BYTES": probes.ProgramHelpers[ProgramHelper{ebpf.XDP, asm.FnXdpStoreBytes}], } return writeFeatureHeader(writer, featuresXdp, false) diff --git a/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go b/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go index 526b2c510d..113580c077 100644 --- a/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go +++ b/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go @@ -92,6 +92,9 @@ const ( // HubbleRedactHttpURLQuery controls if the URL query will be redacted from flows HubbleRedactHttpURLQuery = false + // HubbleRedactHttpUserInfo controls if the user info will be redacted from flows + HubbleRedactHttpUserInfo = true + // HubbleRedactKafkaApiKey controls if the Kafka API key will be redacted from flows HubbleRedactKafkaApiKey = false @@ -335,11 +338,7 @@ const ( // connection tracking garbage collection ConntrackGCStartingInterval = 5 * time.Minute - // K8sEventHandover enables use of the kvstore to optimize Kubernetes - // event handling by listening for k8s events in the operator and - // mirroring it into the kvstore for reduced overhead in large - // clusters. - K8sEventHandover = false + LegacyTurnOffK8sEventHandover = false // LoopbackIPv4 is the default address for service loopback LoopbackIPv4 = "169.254.42.1" @@ -384,9 +383,8 @@ const ( // CiliumNode.Spec.IPAM.PreAllocate if no value is set IPAMPreAllocation = 8 - // IPAMMultiPoolPreAllocation is the default value for multi-pool IPAM - // pre-allocations - IPAMMultiPoolPreAllocation = "default=8" + // IPAMDefaultIPPool is the default value for the multi-pool default pool name. + IPAMDefaultIPPool = "default" // ENIFirstInterfaceIndex is the default value for // CiliumNode.Spec.ENI.FirstInterfaceIndex if no value is set. @@ -525,13 +523,16 @@ const ( // TunnelProtocol is the default tunneling protocol TunnelProtocol = "vxlan" + // ServiceNoBackendResponse is the default response for services without backends + ServiceNoBackendResponse = "reject" + // Use the CiliumInternalIPs (vs. NodeInternalIPs) for IPsec encapsulation. UseCiliumInternalIPForIPsec = false // TunnelPortVXLAN is the default VXLAN port - TunnelPortVXLAN = 8472 + TunnelPortVXLAN uint16 = 8472 // TunnelPortGeneve is the default Geneve port - TunnelPortGeneve = 6081 + TunnelPortGeneve uint16 = 6081 // ARPBaseReachableTime resembles the kernel's NEIGH_VAR_BASE_REACHABLE_TIME which defaults to 30 seconds. ARPBaseReachableTime = 30 * time.Second @@ -545,6 +546,13 @@ const ( // EnableK8sNetworkPolicy enables support for K8s NetworkPolicy. EnableK8sNetworkPolicy = true + + // MaxConnectedClusters sets the maximum number of clusters that can be + // connected in a clustermesh. + // The value is used to determine the bit allocation for cluster ID and + // identity in a numeric identity. Values > 255 will decrease the number of + // allocatable identities. + MaxConnectedClusters = 255 ) var ( diff --git a/vendor/github.com/cilium/cilium/pkg/health/client/modules.go b/vendor/github.com/cilium/cilium/pkg/health/client/modules.go index 73ffc93132..52d6503791 100644 --- a/vendor/github.com/cilium/cilium/pkg/health/client/modules.go +++ b/vendor/github.com/cilium/cilium/pkg/health/client/modules.go @@ -7,11 +7,22 @@ import ( "encoding/json" "fmt" "io" + "sort" + "strings" + "time" + + "k8s.io/apimachinery/pkg/util/duration" "github.com/cilium/cilium/api/v1/client/daemon" "github.com/cilium/cilium/pkg/hive/cell" ) +const ( + noPod = "(/)" + rootNode = "agent" + noErr = "" +) + // ModulesHealth represent hive modules health API. type ModulesHealth interface { // GetHealth retrieves agent modules health. @@ -32,17 +43,19 @@ func GetAndFormatModulesHealth(w io.Writer, clt ModulesHealth, verbose bool) { return } if verbose { - fmt.Fprintln(w) + r := newRoot(rootNode) + sort.Slice(resp.Payload.Modules, func(i, j int) bool { + return resp.Payload.Modules[i].ModuleID < resp.Payload.Modules[j].ModuleID + }) for _, m := range resp.Payload.Modules { - n := &cell.StatusNode{} - if err := json.Unmarshal([]byte(m.Message), n); err != nil { - panic(err) - } if m.Level == string(cell.StatusUnknown) { continue } - fmt.Fprintf(w, "%s", n.StringIndent(2)) + if err := buildTree(r, m.Message); err != nil { + fmt.Fprintf(w, "Modules Health rendering failed: %s\n", err) + } } + fmt.Fprintln(w, "\n"+r.String()) return } tally := make(map[cell.Level]int, 4) @@ -60,3 +73,51 @@ func GetAndFormatModulesHealth(w io.Writer, clt ModulesHealth, verbose bool) { tally[cell.StatusUnknown], ) } + +func buildTree(n *node, raw string) error { + var sn cell.StatusNode + if err := json.Unmarshal([]byte(raw), &sn); err != nil { + return err + } + build(n, &sn) + return nil +} + +func ensurePath(n *node, pp []string) *node { + current := n + for _, p := range pp { + if v := current.find(p); v != nil { + current = v + continue + } + current = current.addBranch(strings.Replace(p, noPod, "", 1)) + } + + return current +} + +func build(n *node, sn *cell.StatusNode) { + meta := fmt.Sprintf("[%s] %s", strings.ToUpper(string(sn.LastLevel)), sn.Message) + if sn.Error != "" { + meta += " -- " + sn.Error + } + meta += fmt.Sprintf(" (%s, x%d)", ToAgeHuman(sn.UpdateTimestamp), sn.Count) + pp := strings.Split(sn.Name, ".") + current := ensurePath(n, pp) + if len(sn.SubStatuses) == 0 { + current.meta = meta + return + } + for _, s := range sn.SubStatuses { + build(current, s) + } +} + +// ToAgeHuman converts time to duration. +func ToAgeHuman(t time.Time) string { + if t.IsZero() { + return "n/a" + } + + return duration.HumanDuration(time.Since(t)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/health/client/tree.go b/vendor/github.com/cilium/cilium/pkg/health/client/tree.go new file mode 100644 index 0000000000..b8dccd5a51 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/health/client/tree.go @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package client + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +const ( + indentSize = 3 + leafMaxWidth = 40 + link decoration = "│" + mid decoration = "├──" + end decoration = "└──" +) + +type decoration string + +func newRoot(r string) *node { + return &node{val: r} +} + +type node struct { + val, meta string + parent *node + nodes []*node +} + +func (n *node) addNode(v string) *node { + return n.addNodeWithMeta(v, "") +} + +func (n *node) addNodeWithMeta(v, m string) *node { + node := node{ + parent: n, + val: v, + meta: m, + } + n.nodes = append(n.nodes, &node) + + return &node +} + +func (n *node) addBranch(v string) *node { + return n.addBranchWithMeta(v, "") +} + +func (n *node) addBranchWithMeta(v, m string) *node { + b := node{ + parent: n, + meta: m, + val: v, + } + n.nodes = append(n.nodes, &b) + + return &b +} + +func (n *node) find(val string) *node { + if n.val == val { + return n + } + for _, node := range n.nodes { + if node.val == val { + return node + } + if v := node.find(val); v != nil { + return v + } + } + + return nil +} + +func (n *node) asBytes() []byte { + var ( + w = new(bytes.Buffer) + levelsEnded []int + max = computeMaxLevel(0, n) + ) + if n.parent == nil { + w.WriteString(n.val) + if n.meta != "" { + w.WriteString(" " + n.meta) + } + fmt.Fprintln(w) + } else { + edge := mid + if len(n.nodes) == 0 { + edge = end + levelsEnded = append(levelsEnded, 0) + } + dumpVals(w, 0, max, levelsEnded, edge, n) + } + if len(n.nodes) > 0 { + dumpNodes(w, 0, max, levelsEnded, n.nodes) + } + + return w.Bytes() +} + +func (n *node) String() string { + return string(n.asBytes()) +} + +func (n *node) lastNode() *node { + c := len(n.nodes) + if c == 0 { + return nil + } + + return n.nodes[c-1] +} + +func computeMaxLevel(level int, n *node) int { + if n == nil || len(n.nodes) == 0 { + return level + } + var max int + for _, n := range n.nodes { + m := computeMaxLevel(level+1, n) + if m > max { + max = m + } + } + + return max +} + +func dumpNodes(w io.Writer, level, maxLevel int, levelsEnded []int, nodes []*node) { + for i, node := range nodes { + edge := mid + if i == len(nodes)-1 { + levelsEnded = append(levelsEnded, level) + edge = end + } + dumpVals(w, level, maxLevel, levelsEnded, edge, node) + if len(node.nodes) > 0 { + dumpNodes(w, level+1, maxLevel, levelsEnded, node.nodes) + } + } +} + +func dumpVals(w io.Writer, level, maxLevel int, levelsEnded []int, edge decoration, node *node) { + for i := 0; i < level; i++ { + if isEnded(levelsEnded, i) { + fmt.Fprint(w, strings.Repeat(" ", indentSize+1)) + continue + } + fmt.Fprintf(w, "%s%s", link, strings.Repeat(" ", indentSize)) + } + + val := dumpVal(level, node) + if node.meta != "" { + c := maxLevel - level + if c < 0 { + c = 0 + } + fmt.Fprintf(w, "%s %-"+strconv.Itoa(leafMaxWidth+c*2)+"s%s%s\n", edge, val, strings.Repeat(" ", c), node.meta) + return + } + fmt.Fprintf(w, "%s %s\n", edge, val) +} + +func isEnded(levelsEnded []int, level int) bool { + for _, l := range levelsEnded { + if l == level { + return true + } + } + + return false +} + +func dumpVal(level int, node *node) string { + lines := strings.Split(node.val, "\n") + if len(lines) < 2 { + return node.val + } + + pad := indent(level, node) + for i := 1; i < len(lines); i++ { + lines[i] = fmt.Sprintf("%s%s", pad, lines[i]) + } + + return strings.Join(lines, "\n") +} + +func indent(level int, node *node) string { + links := make([]string, level+1) + for node.parent != nil { + if isLast(node) { + links[level] = strings.Repeat(" ", indentSize+1) + } else { + links[level] = fmt.Sprintf("%s%s", link, strings.Repeat(" ", indentSize)) + } + level-- + node = node.parent + } + + return strings.Join(links, "") +} + +func isLast(n *node) bool { + return n == n.parent.lastNode() +} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/config.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/config.go index 860e03be38..9d416e9910 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/config.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/config.go @@ -12,6 +12,7 @@ import ( "github.com/spf13/pflag" "go.uber.org/dig" + "github.com/cilium/cilium/pkg/cidr" "github.com/cilium/cilium/pkg/command" ) @@ -105,7 +106,8 @@ func decoderConfig(target any) *mapstructure.DecoderConfig { DecodeHook: mapstructure.ComposeDecodeHookFunc( mapstructure.StringToTimeDurationHookFunc(), mapstructure.StringToSliceHookFunc(","), - stringToMapHookFunc(), + stringToCIDRHookFunc, + stringToMapHookFunc, ), ZeroFields: true, // Error out if the config struct has fields that are @@ -143,14 +145,23 @@ func (c *config[Cfg]) Info(cont container) (info Info) { return } -// stringToMapHookFunc returns a DecodeHookFunc that converts string +// stringToMapHookFunc is a DecodeHookFunc that converts string // to map[string]string supporting both json and KV formats. -func stringToMapHookFunc() mapstructure.DecodeHookFunc { - return func(from reflect.Kind, to reflect.Kind, data interface{}) (interface{}, error) { - if from != reflect.String || to != reflect.Map { - return data, nil - } +func stringToMapHookFunc(from reflect.Kind, to reflect.Kind, data interface{}) (interface{}, error) { + if from != reflect.String || to != reflect.Map { + return data, nil + } + return command.ToStringMapStringE(data.(string)) +} - return command.ToStringMapStringE(data.(string)) +// stringToCIDRSliceHookFunc is a DecodeHookFunc that converts string to []*cidr.CIDR. +func stringToCIDRHookFunc(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { + if from.Kind() != reflect.String { + return data, nil + } + s := data.(string) + if to != reflect.TypeOf((*cidr.CIDR)(nil)) { + return data, nil } + return cidr.ParseCIDR(s) } diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/structured.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/structured.go index d137824ed3..52c79b16a0 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/structured.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/structured.go @@ -416,6 +416,7 @@ type StatusNode struct { UpdateTimestamp time.Time `json:"timestamp"` Count int `json:"count"` SubStatuses []*StatusNode `json:"sub_statuses,omitempty"` + Error string `json:"error,omitempty"` } var _ Update = (*StatusNode)(nil) @@ -476,6 +477,9 @@ func (s *subreporterBase) getStatusTreeLocked(nid string) *StatusNode { UpdateTimestamp: rn.Timestamp, Count: rn.count, } + if err := rn.Error; err != nil { + n.Error = err.Error() + } allok := true childIDs := maps.Keys(children) sort.Strings(childIDs) diff --git a/vendor/github.com/cilium/cilium/pkg/identity/numericidentity.go b/vendor/github.com/cilium/cilium/pkg/identity/numericidentity.go index 889dbdf97d..03d39b743a 100644 --- a/vendor/github.com/cilium/cilium/pkg/identity/numericidentity.go +++ b/vendor/github.com/cilium/cilium/pkg/identity/numericidentity.go @@ -10,6 +10,7 @@ import ( "net/netip" "sort" "strconv" + "sync" "unsafe" cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" @@ -20,10 +21,6 @@ import ( ) const ( - // ClusterIDShift specifies the number of bits the cluster ID will be - // shifted - ClusterIDShift = 16 - // Identities also have scopes, which is defined by the high 8 bits. // 0x00 -- Global and reserved identities. Reserved identities are // not allocated like global identities, but are known @@ -87,13 +84,13 @@ const ( ) var ( - // MinimalAllocationIdentity is the minimum numeric identity handed out - // by the identity allocator. - MinimalAllocationIdentity = MinimalNumericIdentity + // clusterIDInit ensures that clusterIDLen and clusterIDShift can only be + // set once, and only if we haven't used either value elsewhere already. + clusterIDInit sync.Once - // MaximumAllocationIdentity is the maximum numeric identity handed out - // by the identity allocator - MaximumAllocationIdentity = NumericIdentity((1< 0 { +// GetClusterIDShift returns the number of bits to shift a cluster ID in a numeric +// identity and is equal to the number of bits that represent a cluster-local identity. +// A sync.Once is used to ensure we only initialize clusterIDShift once. +func GetClusterIDShift() uint32 { + clusterIDInit.Do(initClusterIDShift) + return clusterIDShift +} + +// initClusterIDShift sets variables that control the bit allocation of cluster +// ID in a numeric identity. +func initClusterIDShift() { + // ClusterIDLen is the number of bits that represent a cluster ID in a numeric identity + clusterIDLen := uint32(math.Log2(float64(cmtypes.ClusterIDMax + 1))) + // ClusterIDShift is the number of bits to shift a cluster ID in a numeric identity + clusterIDShift = NumericIdentityBitlength - clusterIDLen +} + +// GetMinimalNumericIdentity returns the minimal numeric identity not used for +// reserved purposes. +func GetMinimalAllocationIdentity() NumericIdentity { + if option.Config.ClusterID > 0 { // For ClusterID > 0, the identity range just starts from cluster shift, // no well-known-identities need to be reserved from the range. - MinimalAllocationIdentity = NumericIdentity((1 << ClusterIDShift) * cinfo.ID) - // The maximum identity also needs to be recalculated as ClusterID - // may be overwritten by runtime parameters. - MaximumAllocationIdentity = NumericIdentity((1<> 16) & 0xFF + return (uint32(id) >> uint32(GetClusterIDShift())) & cmtypes.ClusterIDMax } // GetAllReservedIdentities returns a list of all reserved numeric identities diff --git a/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go b/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go index 6f30091723..90f2b9bb7c 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go +++ b/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go @@ -46,6 +46,3 @@ const ( // prefixes. Every /28 prefix contains 16 IP addresses. // See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html#ec2-prefix-basics for more details const ENIPDBlockSizeIPv4 = 16 - -// PoolDefault is the default IP pool from which to allocate. -const PoolDefault = "default" diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go index 23bdd7ce70..fa5a936ea4 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go @@ -15,5 +15,5 @@ const ( // // Maintainers: Run ./Documentation/check-crd-compat-table.sh for each release // Developers: Bump patch for each change in the CRD schema. - CustomResourceDefinitionSchemaVersion = "1.26.11" + CustomResourceDefinitionSchemaVersion = "1.26.12" ) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go index af821e8dfb..1353fda6dc 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go @@ -4,6 +4,7 @@ package v2 import ( + "net" "sort" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -20,11 +21,10 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=false // +kubebuilder:resource:categories={cilium},singular="ciliumendpoint",path="ciliumendpoints",scope="Namespaced",shortName={cep,ciliumep} -// +kubebuilder:printcolumn:JSONPath=".status.id",description="Cilium endpoint id",name="Endpoint ID",type=integer -// +kubebuilder:printcolumn:JSONPath=".status.identity.id",description="Cilium identity id",name="Identity ID",type=integer -// +kubebuilder:printcolumn:JSONPath=".status.policy.ingress.state",description="Ingress enforcement in the endpoint",name="Ingress Enforcement",type=string -// +kubebuilder:printcolumn:JSONPath=".status.policy.egress.state",description="Egress enforcement in the endpoint",name="Egress Enforcement",type=string -// +kubebuilder:printcolumn:JSONPath=".status.visibility-policy-status",description="Status of visibility policy in the endpoint",name="Visibility Policy",type=string +// +kubebuilder:printcolumn:JSONPath=".status.identity.id",description="Security Identity",name="Security Identity",type=integer +// +kubebuilder:printcolumn:JSONPath=".status.policy.ingress.state",description="Ingress enforcement in the endpoint",name="Ingress Enforcement",type=string,priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.policy.egress.state",description="Egress enforcement in the endpoint",name="Egress Enforcement",type=string,priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.visibility-policy-status",description="Status of visibility policy in the endpoint",name="Visibility Policy",type=string,priority=1 // +kubebuilder:printcolumn:JSONPath=".status.state",description="Endpoint current state",name="Endpoint State",type=string // +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv4",description="Endpoint IPv4 address",name="IPv4",type=string // +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv6",description="Endpoint IPv6 address",name="IPv6",type=string @@ -465,3 +465,22 @@ func (n *CiliumNode) InstanceID() (instanceID string) { } return } + +func (n NodeAddress) ToString() string { + return n.IP +} + +func (n NodeAddress) AddrType() addressing.AddressType { + return n.Type +} + +// GetIP returns one of the CiliumNode's IP addresses available with the +// following priority: +// - NodeInternalIP +// - NodeExternalIP +// - other IP address type +// An error is returned if GetIP fails to extract an IP from the CiliumNode +// based on the provided address family. +func (n *CiliumNode) GetIP(ipv6 bool) net.IP { + return addressing.ExtractNodeIP[NodeAddress](n.Spec.Addresses, ipv6) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go index 00962166a7..3558015228 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go @@ -34,6 +34,8 @@ const ( PodCIDRSelectorName = "PodCIDR" // CiliumLoadBalancerIPPoolSelectorName defines the name for a selector matching CiliumLoadBalancerIPPool resources. CiliumLoadBalancerIPPoolSelectorName = "CiliumLoadBalancerIPPool" + // CiliumPodIPPoolSelectorName defines the name for a selector matching CiliumPodIPPool resources. + CiliumPodIPPoolSelectorName = CPIPKindDefinition ) // +genclient @@ -138,8 +140,10 @@ type CiliumBGPPathAttributes struct { // Only affects routes of cluster scope / Kubernetes IPAM CIDRs, not Multi-Pool IPAM CIDRs. // - For "CiliumLoadBalancerIPPool" the Selector matches CiliumLoadBalancerIPPool custom resources // (path attributes apply to routes announced for selected CiliumLoadBalancerIPPools). + // - For "CiliumPodIPPool" the Selector matches CiliumPodIPPool custom resources + // (path attributes apply to routes announced for allocated CIDRs of selected CiliumPodIPPools). // - // +kubebuilder:validation:Enum=PodCIDR;CiliumLoadBalancerIPPool + // +kubebuilder:validation:Enum=PodCIDR;CiliumLoadBalancerIPPool;CiliumPodIPPool // +kubebuilder:validation:Required SelectorType string `json:"selectorType"` diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go index 63de1bc6a4..f76b229b0b 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go @@ -1175,172 +1175,174 @@ func init() { } var fileDescriptor_871504499faea14d = []byte{ - // 2636 bytes of a gzipped FileDescriptorProto + // 2659 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0xcf, 0x6f, 0x23, 0x49, 0xf5, 0x9f, 0xb6, 0xe3, 0xc4, 0x7e, 0x4e, 0x9c, 0x4d, 0xcd, 0x66, 0xd5, 0x93, 0xef, 0x6c, 0x92, - 0x6f, 0x0f, 0x42, 0xc3, 0x2f, 0x5b, 0x33, 0x10, 0x98, 0x9d, 0xd9, 0x19, 0x26, 0x76, 0x32, 0x3b, - 0xde, 0x4d, 0x66, 0x9b, 0x72, 0xb4, 0x20, 0x38, 0xb0, 0x9d, 0xee, 0x8a, 0xdd, 0x1b, 0xbb, 0xdb, - 0x74, 0x95, 0x33, 0x6b, 0x09, 0xc1, 0x22, 0x04, 0x5a, 0x60, 0x24, 0xf8, 0x07, 0xb8, 0x70, 0xe3, - 0x0c, 0x17, 0xc4, 0x19, 0x31, 0xdc, 0x96, 0xdb, 0x08, 0xa1, 0xb0, 0x13, 0x24, 0x6e, 0x1c, 0x10, - 0x27, 0x82, 0x84, 0x50, 0x55, 0x57, 0x57, 0x77, 0x3b, 0xf6, 0x4c, 0xc6, 0x8e, 0x14, 0x71, 0x72, - 0xf7, 0x7b, 0xaf, 0xde, 0xa7, 0x5e, 0xd5, 0xab, 0xf7, 0xa3, 0xdc, 0xb0, 0xd9, 0x74, 0x59, 0xab, - 0xb7, 0x5b, 0xb6, 0xfd, 0x4e, 0xc5, 0x76, 0xdb, 0x6e, 0x4f, 0xfd, 0x74, 0xf7, 0x9b, 0x95, 0xfd, - 0x1b, 0xb4, 0x42, 0xdb, 0x6e, 0x47, 0x3c, 0x58, 0x5d, 0xb7, 0x62, 0xfb, 0x01, 0xa9, 0x1c, 0x5c, - 0xab, 0x34, 0x89, 0x47, 0x02, 0x8b, 0x11, 0xa7, 0xdc, 0x0d, 0x7c, 0xe6, 0xa3, 0xb5, 0x58, 0x4d, - 0x39, 0x1c, 0x1f, 0xfd, 0x74, 0xf7, 0x9b, 0xe5, 0xfd, 0x1b, 0xb4, 0xcc, 0xd5, 0x88, 0x07, 0xab, - 0xeb, 0x96, 0xb9, 0x9a, 0xf2, 0xc1, 0xb5, 0xa5, 0x7b, 0x2f, 0x84, 0x4e, 0x2b, 0x1d, 0xc2, 0xac, - 0x21, 0xf0, 0x4b, 0x9f, 0x4b, 0xe8, 0x69, 0xfa, 0x4d, 0xbf, 0x22, 0xc8, 0xbb, 0xbd, 0x3d, 0xf1, - 0x26, 0x5e, 0xc4, 0x93, 0x14, 0xe7, 0x0a, 0xcb, 0xae, 0xcf, 0x75, 0x76, 0x2c, 0xbb, 0xe5, 0x7a, - 0x24, 0xe8, 0x0b, 0xc4, 0xa0, 0xe7, 0x31, 0xb7, 0x43, 0x4e, 0xe8, 0xff, 0xe2, 0xf3, 0x06, 0x50, - 0xbb, 0x45, 0x3a, 0xd6, 0xe0, 0x38, 0x63, 0x0b, 0x4a, 0xb5, 0xb6, 0x4b, 0x3c, 0x56, 0x37, 0x6b, - 0xbe, 0xb7, 0xe7, 0x36, 0xd1, 0x4d, 0x28, 0xf1, 0x01, 0x7e, 0x8f, 0x35, 0x88, 0xed, 0x7b, 0x0e, - 0xd5, 0xb5, 0x55, 0xed, 0x6a, 0xae, 0x8a, 0x8e, 0x0e, 0x57, 0x4a, 0x3b, 0x29, 0x0e, 0x1e, 0x90, - 0x34, 0x7e, 0x9b, 0x81, 0x42, 0xcd, 0xf7, 0x98, 0xc5, 0xf1, 0xd1, 0x2a, 0x4c, 0x79, 0x56, 0x87, - 0x88, 0xf1, 0x85, 0xea, 0xec, 0xe3, 0xc3, 0x95, 0x0b, 0x47, 0x87, 0x2b, 0x53, 0x0f, 0xac, 0x0e, - 0xc1, 0x82, 0x83, 0xae, 0x40, 0xce, 0xed, 0x58, 0x4d, 0xa2, 0x67, 0x84, 0xc8, 0x9c, 0x14, 0xc9, - 0xd5, 0x39, 0x11, 0x87, 0x3c, 0xe4, 0x42, 0xae, 0xeb, 0x07, 0x8c, 0xea, 0xd3, 0xab, 0xd9, 0xab, - 0xc5, 0xeb, 0x1b, 0xe5, 0xb1, 0x76, 0xb2, 0xac, 0xe6, 0x65, 0xfa, 0x01, 0x8b, 0xa1, 0xf8, 0x1b, - 0xc5, 0x21, 0x02, 0xfa, 0x36, 0xcc, 0x1e, 0xf8, 0xed, 0x5e, 0x87, 0x6c, 0xfb, 0x3d, 0x8f, 0x51, - 0xbd, 0x20, 0x10, 0xab, 0x63, 0x22, 0xbe, 0x13, 0xab, 0xaa, 0xbe, 0x2c, 0xf1, 0x66, 0x13, 0x44, - 0x8a, 0x53, 0x68, 0xc6, 0x7f, 0x34, 0x98, 0x4b, 0xcd, 0xf2, 0x14, 0x2b, 0xf8, 0x59, 0xc8, 0xb7, - 0x7c, 0xca, 0xb8, 0xb4, 0x58, 0xc4, 0x5c, 0xf5, 0x25, 0x29, 0x95, 0xbf, 0x2f, 0xe9, 0x58, 0x49, - 0xa0, 0x5b, 0x30, 0x67, 0x27, 0x01, 0xf4, 0xac, 0x18, 0xb2, 0x28, 0x87, 0xa4, 0xd1, 0x71, 0x5a, - 0x16, 0xdd, 0x80, 0xbc, 0xf0, 0x19, 0xdb, 0x6f, 0xeb, 0x53, 0x62, 0x42, 0x97, 0x23, 0x28, 0x53, - 0xd2, 0x8f, 0x13, 0xcf, 0x58, 0x49, 0xa3, 0x4f, 0xc2, 0x34, 0x9f, 0x42, 0xdd, 0xd4, 0x73, 0x62, - 0x5c, 0x49, 0x8e, 0x9b, 0xbe, 0x2f, 0xa8, 0x58, 0x72, 0x8d, 0x1f, 0x68, 0x50, 0x52, 0x53, 0x68, - 0x30, 0x8b, 0x11, 0x44, 0x61, 0x26, 0xe8, 0x79, 0x9e, 0xeb, 0x35, 0x85, 0x79, 0xc5, 0xeb, 0x5b, - 0x93, 0x6e, 0xbf, 0xd0, 0x8b, 0x43, 0x9d, 0xd5, 0xe2, 0xd1, 0xe1, 0xca, 0x8c, 0x7c, 0xc1, 0x11, - 0x92, 0xf1, 0x43, 0x0d, 0x16, 0x87, 0xca, 0xa3, 0x0e, 0x14, 0x28, 0xb3, 0x02, 0x46, 0x9c, 0x75, - 0x26, 0x76, 0xa5, 0x78, 0xfd, 0xf5, 0x17, 0x9b, 0x10, 0x2d, 0xf3, 0x10, 0xc1, 0x67, 0xc4, 0xcf, - 0x52, 0x75, 0x41, 0x2e, 0x45, 0xa1, 0x11, 0xa9, 0xc5, 0x31, 0x82, 0xf1, 0x6b, 0x0d, 0xe6, 0x53, - 0x13, 0xe9, 0x51, 0xf4, 0x1e, 0xe4, 0x28, 0x9f, 0x92, 0x5c, 0x8f, 0xcd, 0x33, 0x59, 0x8f, 0xf8, - 0x3c, 0x84, 0xe6, 0x86, 0x10, 0x68, 0x0d, 0x8a, 0xca, 0x07, 0xea, 0x1b, 0x7a, 0x5e, 0xec, 0xde, - 0x45, 0x29, 0x5a, 0xac, 0xc5, 0x2c, 0x9c, 0x94, 0x33, 0xbe, 0x0a, 0xf3, 0x9b, 0x9e, 0xd3, 0xf5, - 0x5d, 0x8f, 0xad, 0x3b, 0x4e, 0x40, 0x28, 0x45, 0x4b, 0x90, 0x71, 0xbb, 0xd2, 0x8f, 0x41, 0x2a, - 0xc8, 0xd4, 0x4d, 0x9c, 0x71, 0xbb, 0xe8, 0x2a, 0xe4, 0x3d, 0xdf, 0x21, 0xdc, 0xab, 0xa5, 0x63, - 0xcd, 0x72, 0xa7, 0x7a, 0x20, 0x69, 0x58, 0x71, 0x8d, 0x47, 0x1a, 0xcc, 0x46, 0x9a, 0x4f, 0x79, - 0x40, 0x56, 0x61, 0xaa, 0x1b, 0x1f, 0x0e, 0x25, 0x21, 0x1c, 0x5c, 0x70, 0x52, 0x7e, 0x9d, 0x7d, - 0x11, 0xbf, 0x36, 0xfe, 0xa9, 0x41, 0x29, 0x9a, 0x4e, 0xa3, 0xb7, 0x4b, 0x09, 0x43, 0x0f, 0xa1, - 0x60, 0x85, 0x26, 0x13, 0x1e, 0x38, 0x79, 0xf8, 0xb8, 0x37, 0xe6, 0x0e, 0x0d, 0x2c, 0x61, 0xec, - 0x2a, 0xeb, 0x11, 0x00, 0x8e, 0xb1, 0x50, 0x2b, 0x8a, 0x92, 0x59, 0x01, 0x5a, 0x9b, 0x10, 0x74, - 0x74, 0x90, 0x34, 0xfe, 0xa1, 0x41, 0x21, 0x12, 0xa3, 0x28, 0x80, 0x3c, 0x77, 0x68, 0xc7, 0x62, - 0x96, 0x3c, 0x10, 0xd5, 0x71, 0x0f, 0xc4, 0xdb, 0xbb, 0xef, 0x11, 0x9b, 0x6d, 0x13, 0x66, 0x55, - 0x91, 0x44, 0x86, 0x98, 0x86, 0x15, 0x0e, 0xea, 0xc2, 0x0c, 0x15, 0xcb, 0x4d, 0xf5, 0x8c, 0xb0, - 0x76, 0x73, 0x42, 0x6b, 0xc3, 0xcd, 0xab, 0xce, 0x4b, 0xd4, 0x99, 0xf0, 0x9d, 0xe2, 0x08, 0xc6, - 0xf8, 0x9b, 0x06, 0x73, 0xca, 0xe6, 0x2d, 0x97, 0x32, 0xe4, 0x9d, 0xb0, 0xfb, 0xee, 0xb8, 0x76, - 0x73, 0x7d, 0xc2, 0x6a, 0x15, 0xba, 0x23, 0x4a, 0xc2, 0x66, 0x02, 0x39, 0x97, 0x91, 0x4e, 0x64, - 0xf1, 0xdd, 0x09, 0x2d, 0xa6, 0x89, 0x64, 0xcb, 0xd5, 0xe2, 0x50, 0xbb, 0xf1, 0x7b, 0x0d, 0x2e, - 0x6e, 0xf9, 0x96, 0x53, 0xb5, 0xda, 0x96, 0x67, 0x93, 0xa0, 0xee, 0x35, 0x9f, 0x7b, 0x7e, 0x65, - 0x0e, 0x12, 0x07, 0x31, 0x4c, 0xe4, 0xa9, 0x1c, 0xe4, 0x89, 0x33, 0x1c, 0x49, 0xa0, 0xbd, 0xc8, - 0x51, 0xa7, 0x84, 0x21, 0xeb, 0x63, 0x1a, 0xc2, 0x5d, 0x32, 0x8c, 0x88, 0x23, 0xdc, 0xf4, 0x27, - 0x1a, 0xa0, 0xa4, 0x25, 0x32, 0x7c, 0xf6, 0x60, 0xc6, 0x0d, 0x6d, 0x92, 0xc7, 0xf3, 0xcd, 0x31, - 0x27, 0x30, 0x64, 0x95, 0x62, 0x07, 0x92, 0x04, 0x1c, 0x61, 0x19, 0xdf, 0x85, 0x02, 0x0f, 0x4a, - 0xb4, 0x6b, 0xd9, 0xe4, 0x3c, 0xce, 0x8c, 0xf0, 0x60, 0x35, 0x83, 0xff, 0x65, 0x0f, 0x56, 0x46, - 0x8c, 0xf0, 0xe0, 0xc7, 0x19, 0x98, 0xe2, 0xa9, 0xe3, 0x5c, 0x22, 0x93, 0x05, 0x53, 0xb4, 0x4b, - 0x6c, 0x99, 0x9b, 0xbf, 0x3c, 0xae, 0x89, 0xbe, 0x43, 0x1a, 0x5d, 0x62, 0xc7, 0xe9, 0x8a, 0xbf, - 0x61, 0xa1, 0x1a, 0xb9, 0x30, 0x4d, 0x85, 0x2b, 0x8b, 0x64, 0x35, 0xfe, 0x01, 0x12, 0x20, 0xe1, - 0x01, 0x52, 0xf5, 0x58, 0xf8, 0x8e, 0x25, 0x80, 0xd1, 0x81, 0x22, 0x97, 0x8a, 0x72, 0xf8, 0xe7, - 0x61, 0x8a, 0xf5, 0xbb, 0x51, 0xb2, 0x5d, 0x89, 0xe6, 0xb6, 0xd3, 0xef, 0x92, 0xe3, 0xc3, 0x95, - 0xf9, 0x84, 0x28, 0x27, 0x61, 0x21, 0x8c, 0x3e, 0x05, 0x33, 0x32, 0x49, 0xc9, 0xd8, 0xa0, 0xce, - 0x88, 0x94, 0xc5, 0x11, 0xdf, 0xf8, 0x25, 0x77, 0x51, 0xdf, 0x21, 0x35, 0xdf, 0x73, 0x5c, 0xe6, - 0xfa, 0x1e, 0x5a, 0x4b, 0x21, 0xfe, 0xff, 0x00, 0xe2, 0x42, 0x4a, 0x38, 0x81, 0xf9, 0x9a, 0x5a, - 0xa2, 0x4c, 0x6a, 0xa0, 0xb4, 0x8f, 0x4f, 0x56, 0x0d, 0x4b, 0x9b, 0xcc, 0x4b, 0xd5, 0x80, 0x58, - 0xd4, 0xf7, 0x06, 0x4b, 0x55, 0x2c, 0xa8, 0x58, 0x72, 0x8d, 0x3f, 0x6b, 0x20, 0x0a, 0x94, 0x73, - 0x39, 0x49, 0xef, 0xa6, 0x4f, 0xd2, 0xad, 0x09, 0x3c, 0x60, 0xc4, 0x21, 0xfa, 0x97, 0x34, 0x8f, - 0xfb, 0x1d, 0xdf, 0xc2, 0xae, 0xef, 0xd4, 0xea, 0x1b, 0x58, 0x6e, 0x84, 0xda, 0x42, 0x33, 0x24, - 0xe3, 0x88, 0xcf, 0x4b, 0x39, 0xf9, 0x48, 0xf5, 0x99, 0xd5, 0x6c, 0x54, 0xca, 0x49, 0x39, 0x8a, - 0x15, 0x17, 0x5d, 0x07, 0xe8, 0x06, 0xfe, 0x81, 0xeb, 0x88, 0xca, 0x32, 0xac, 0xbb, 0xd4, 0xd9, - 0x32, 0x15, 0x07, 0x27, 0xa4, 0x90, 0x03, 0xd3, 0xbc, 0xc6, 0x64, 0x54, 0xcf, 0x09, 0xc3, 0x5f, - 0x1f, 0xd3, 0xf0, 0x1d, 0xae, 0x24, 0xde, 0x5a, 0xf1, 0x4a, 0xb1, 0xd4, 0x6d, 0xfc, 0x5b, 0x03, - 0x88, 0x0f, 0x07, 0x7a, 0x1f, 0xc0, 0x8e, 0x9c, 0x25, 0x4a, 0x5a, 0x1b, 0x13, 0xac, 0xb8, 0xf2, - 0xbc, 0xd8, 0x5c, 0x45, 0xa2, 0x38, 0x81, 0x85, 0x68, 0xb2, 0x96, 0xcc, 0x4d, 0xd4, 0x8a, 0x26, - 0xce, 0xe6, 0xb3, 0xeb, 0x48, 0xe3, 0x77, 0x19, 0xc8, 0x9a, 0xbe, 0x73, 0x2e, 0xd1, 0xf3, 0xdd, - 0x54, 0xf4, 0xbc, 0x33, 0x76, 0x65, 0xe0, 0x8c, 0x0c, 0x9e, 0xad, 0x81, 0xe0, 0x79, 0x77, 0x02, - 0x8c, 0x67, 0xc7, 0xce, 0x27, 0x59, 0x98, 0xe5, 0x6e, 0xaf, 0x62, 0xd9, 0x17, 0x52, 0xb1, 0x6c, - 0x75, 0x20, 0x96, 0xbd, 0x94, 0x94, 0x3d, 0x9b, 0x50, 0xd6, 0x87, 0xb9, 0xb6, 0x45, 0x99, 0x19, - 0xf8, 0xbb, 0x84, 0xf7, 0x9a, 0xd2, 0xe4, 0xc9, 0xfa, 0x55, 0x75, 0x55, 0xb0, 0x95, 0x54, 0x8d, - 0xd3, 0x48, 0xe8, 0x43, 0x0d, 0x10, 0xa7, 0xec, 0x04, 0x96, 0x47, 0x43, 0x93, 0x5c, 0xd9, 0xdc, - 0x4d, 0x3a, 0x81, 0x25, 0x39, 0x01, 0xb4, 0x75, 0x42, 0x3f, 0x1e, 0x82, 0x79, 0xda, 0x80, 0xce, - 0x83, 0x5c, 0x87, 0x50, 0x6a, 0x35, 0x89, 0x3e, 0x9d, 0x0e, 0x72, 0xdb, 0x21, 0x19, 0x47, 0x7c, - 0xe3, 0x0a, 0xe4, 0x4c, 0xdf, 0xa9, 0x9b, 0xcf, 0x2a, 0x8a, 0x8d, 0x3f, 0x69, 0xc0, 0xc3, 0xe3, - 0xb9, 0xe4, 0x87, 0x6f, 0xa6, 0xf3, 0xc3, 0xcd, 0xf1, 0x9d, 0x7c, 0x44, 0x7a, 0xf8, 0x55, 0x56, - 0x18, 0x27, 0xb2, 0xc3, 0x07, 0x1a, 0x94, 0x5c, 0xcf, 0x65, 0xea, 0x36, 0x80, 0xea, 0x2f, 0x4f, - 0x54, 0xe0, 0x29, 0x45, 0xd5, 0x57, 0x24, 0x78, 0xa9, 0x9e, 0xd2, 0x8f, 0x07, 0xf0, 0x10, 0x13, - 0x21, 0x3a, 0x42, 0xcf, 0x9c, 0x11, 0x7a, 0x32, 0x3c, 0x47, 0xc8, 0x09, 0x1c, 0xf4, 0x26, 0x20, - 0x4a, 0x82, 0x03, 0xd7, 0x26, 0xeb, 0xb6, 0xed, 0xf7, 0x3c, 0x26, 0x2e, 0x30, 0xc2, 0x3b, 0x12, - 0xe5, 0xa5, 0x8d, 0x13, 0x12, 0x78, 0xc8, 0x28, 0xde, 0x42, 0xa9, 0x2b, 0x10, 0x48, 0xb7, 0x50, - 0x27, 0xaf, 0x41, 0xd0, 0x1a, 0x14, 0x79, 0x3b, 0xf5, 0x80, 0xb0, 0x87, 0x7e, 0xb0, 0xaf, 0x17, - 0x57, 0xb5, 0xab, 0xf9, 0xf8, 0x5a, 0xe6, 0x7e, 0xcc, 0xc2, 0x49, 0x39, 0xe3, 0x17, 0x39, 0x28, - 0xa8, 0xc0, 0x85, 0x2a, 0x90, 0xeb, 0xb6, 0x2c, 0x1a, 0x05, 0xa4, 0x4b, 0xaa, 0x89, 0xe2, 0xc4, - 0xe3, 0x30, 0x69, 0x8b, 0x67, 0x1c, 0xca, 0xa1, 0x87, 0xa9, 0x44, 0x98, 0x99, 0xe8, 0x9a, 0x21, - 0x19, 0xed, 0x9e, 0x9b, 0x07, 0x4f, 0x79, 0x7d, 0x88, 0xae, 0xf0, 0xce, 0xd2, 0xa9, 0x9b, 0xf2, - 0x00, 0x27, 0xda, 0x42, 0xa7, 0x6e, 0xe2, 0x90, 0xc7, 0x6b, 0x08, 0xf1, 0x40, 0xf5, 0xd9, 0x89, - 0x6a, 0x08, 0xa1, 0x34, 0x9e, 0x8a, 0x78, 0xa5, 0x58, 0xea, 0x46, 0xae, 0xbc, 0x27, 0x14, 0x61, - 0x6f, 0xe6, 0x0c, 0xc2, 0xde, 0x9c, 0xba, 0x23, 0x14, 0x91, 0x2e, 0xd6, 0x8e, 0x7e, 0xaa, 0xc1, - 0x82, 0x9d, 0xbe, 0x23, 0x24, 0x54, 0xcf, 0x4f, 0x74, 0xf5, 0x34, 0x70, 0xe7, 0xa8, 0x9c, 0x63, - 0xa1, 0x36, 0x08, 0x84, 0x4f, 0x62, 0xa3, 0x5b, 0x90, 0xff, 0x96, 0x4f, 0x6b, 0x6d, 0x8b, 0x52, - 0xbd, 0x90, 0xea, 0x15, 0xf2, 0x5f, 0x79, 0xbb, 0x21, 0xe8, 0xc7, 0x87, 0x2b, 0x45, 0xd3, 0x77, - 0xa2, 0x57, 0xac, 0x06, 0x18, 0x3f, 0xd2, 0x00, 0xe2, 0xde, 0x5e, 0x5d, 0xdf, 0x69, 0xa7, 0xba, - 0xbe, 0xcb, 0xbc, 0xd0, 0xb5, 0xf4, 0x0a, 0xe4, 0x48, 0x10, 0xf8, 0x81, 0xac, 0x3e, 0x0b, 0xdc, - 0x57, 0x36, 0x39, 0x01, 0x87, 0x74, 0xe3, 0x0f, 0x53, 0x30, 0xdd, 0x20, 0x76, 0x40, 0xd8, 0xb9, - 0x94, 0x43, 0x9f, 0x81, 0x82, 0xdb, 0xe9, 0xf4, 0x98, 0xb5, 0xdb, 0x26, 0xc2, 0xf5, 0xf3, 0xa1, - 0x1b, 0xd4, 0x23, 0x22, 0x8e, 0xf9, 0x28, 0x80, 0x29, 0x31, 0xb9, 0xf0, 0x5c, 0xbe, 0x31, 0xe6, - 0xc6, 0x87, 0xd6, 0x96, 0x37, 0x2c, 0x66, 0x6d, 0x7a, 0x2c, 0xe8, 0xab, 0x7c, 0x3f, 0xc5, 0x49, - 0x3f, 0xfe, 0xcb, 0x4a, 0xae, 0xda, 0x67, 0x84, 0x62, 0x81, 0x85, 0xbe, 0xa7, 0x01, 0x50, 0x16, - 0xb8, 0x5e, 0x93, 0x73, 0x65, 0x6d, 0xbc, 0x3d, 0x19, 0x74, 0x43, 0xe9, 0x0b, 0x27, 0xa0, 0x96, - 0x28, 0x66, 0xe0, 0x04, 0x28, 0x2a, 0xcb, 0xb2, 0x2a, 0x9b, 0x8a, 0xbb, 0x51, 0x59, 0x05, 0xa1, - 0xd6, 0xb8, 0xa0, 0x5a, 0xfa, 0x12, 0x14, 0x94, 0x72, 0xf4, 0x12, 0x64, 0xf7, 0x49, 0x3f, 0x8c, - 0x80, 0x98, 0x3f, 0xa2, 0x97, 0x21, 0x77, 0x60, 0xb5, 0x7b, 0xe1, 0x45, 0xd6, 0x2c, 0x0e, 0x5f, - 0x6e, 0x66, 0x6e, 0x68, 0x4b, 0xb7, 0x61, 0x7e, 0x60, 0x6e, 0xcf, 0x1b, 0x5e, 0x48, 0x0c, 0x37, - 0x3e, 0xd6, 0x40, 0x4e, 0xe6, 0x5c, 0x4a, 0x82, 0xdd, 0x74, 0x49, 0x70, 0x7b, 0xa2, 0x4d, 0x1a, - 0x51, 0x15, 0xfc, 0x31, 0x03, 0x33, 0x32, 0xdf, 0x9d, 0xcb, 0x79, 0x71, 0x52, 0xed, 0x43, 0x75, - 0x6c, 0x13, 0x85, 0x05, 0x23, 0x5b, 0x88, 0xf6, 0x40, 0x0b, 0xb1, 0x31, 0x21, 0xce, 0xb3, 0xdb, - 0x88, 0x23, 0x0d, 0x8a, 0x52, 0xf2, 0x5c, 0xfc, 0xc6, 0x4e, 0xfb, 0xcd, 0x9d, 0xc9, 0x8c, 0x1d, - 0xe1, 0x38, 0xbf, 0x89, 0x8d, 0x3c, 0xe5, 0xbf, 0x3a, 0xe3, 0x07, 0xfd, 0x28, 0xa1, 0x64, 0x47, - 0x26, 0x14, 0x59, 0x8b, 0x89, 0xff, 0x47, 0x73, 0xe9, 0xbf, 0x54, 0x1f, 0x48, 0x3a, 0x56, 0x12, - 0xc6, 0xa3, 0xa2, 0x9a, 0xbb, 0x28, 0x87, 0x9b, 0xd1, 0xf5, 0xb6, 0x36, 0x51, 0xc3, 0x9e, 0x58, - 0x8e, 0x11, 0xff, 0x55, 0x7f, 0x07, 0xf2, 0x94, 0xb4, 0x89, 0xcd, 0xfc, 0x40, 0x6e, 0x8e, 0x39, - 0xb9, 0xc7, 0x97, 0x1b, 0x52, 0x65, 0x18, 0x7c, 0x95, 0xe1, 0x11, 0x19, 0x2b, 0x4c, 0x54, 0x81, - 0x82, 0xdd, 0xee, 0x51, 0x46, 0x82, 0xba, 0x29, 0xa3, 0xaf, 0xba, 0x59, 0xa8, 0x45, 0x0c, 0x1c, - 0xcb, 0xa0, 0x32, 0x80, 0x7a, 0xa1, 0x3a, 0x12, 0xb7, 0x43, 0x25, 0x51, 0xf6, 0x29, 0x2a, 0x4e, - 0x48, 0xa0, 0x8a, 0x8c, 0xec, 0xe1, 0x5f, 0x82, 0xff, 0x37, 0x10, 0xd9, 0xa3, 0x45, 0x4f, 0xf4, - 0xca, 0xd7, 0xa0, 0x48, 0xde, 0x67, 0x24, 0xf0, 0xac, 0x36, 0x47, 0xc8, 0x09, 0x84, 0x79, 0x5e, - 0x12, 0x6f, 0xc6, 0x64, 0x9c, 0x94, 0x41, 0x3b, 0x30, 0x4f, 0x09, 0xa5, 0xae, 0xef, 0xad, 0xef, - 0xed, 0xf1, 0xae, 0xa2, 0x2f, 0xaa, 0xb5, 0x42, 0xf5, 0xd3, 0x12, 0x6e, 0xbe, 0x91, 0x66, 0x1f, - 0x0b, 0x52, 0x58, 0xbf, 0x4b, 0x12, 0x1e, 0x54, 0x81, 0xee, 0x40, 0xa9, 0x9d, 0xfc, 0x77, 0xc0, - 0x94, 0x5d, 0x81, 0xea, 0x67, 0x52, 0xff, 0x1d, 0x98, 0x78, 0x40, 0x1a, 0x7d, 0x0d, 0xf4, 0x24, - 0xa5, 0xe1, 0xf7, 0x02, 0x9b, 0x60, 0xcb, 0x6b, 0x92, 0xf0, 0x93, 0x84, 0x42, 0xf5, 0xf2, 0xd1, - 0xe1, 0x8a, 0xbe, 0x35, 0x42, 0x06, 0x8f, 0x1c, 0x8d, 0x28, 0x2c, 0x46, 0xe6, 0xef, 0x04, 0xd6, - 0xde, 0x9e, 0x6b, 0x9b, 0x7e, 0xdb, 0xb5, 0xfb, 0xa2, 0x87, 0x28, 0x54, 0x6f, 0xcb, 0x09, 0x2e, - 0x6e, 0x0e, 0x13, 0x3a, 0x3e, 0x5c, 0xb9, 0x2c, 0x6d, 0x1f, 0xca, 0xc7, 0xc3, 0x75, 0xa3, 0x6d, - 0xb8, 0xd8, 0x22, 0x56, 0x9b, 0xb5, 0x6a, 0x2d, 0x62, 0xef, 0x47, 0x67, 0x48, 0x9f, 0x15, 0x67, - 0x2b, 0xda, 0xd7, 0x8b, 0xf7, 0x4f, 0x8a, 0xe0, 0x61, 0xe3, 0xd0, 0xcf, 0x35, 0x58, 0x1c, 0x58, - 0xf1, 0xf0, 0xd3, 0x15, 0xbd, 0x34, 0xd1, 0x17, 0x02, 0x8d, 0x61, 0x3a, 0xab, 0x97, 0xf8, 0x72, - 0x0c, 0x65, 0xe1, 0xe1, 0xb3, 0x40, 0x37, 0x01, 0xdc, 0xee, 0x3d, 0xab, 0xe3, 0xb6, 0x5d, 0x42, - 0xf5, 0x8b, 0x62, 0xbf, 0x96, 0xb8, 0x9f, 0xd7, 0xcd, 0x88, 0xca, 0x63, 0x93, 0x7c, 0xeb, 0xe3, - 0x84, 0x34, 0xda, 0x82, 0x92, 0x7c, 0xeb, 0xcb, 0x8d, 0x59, 0x10, 0x1b, 0xf3, 0x09, 0xd1, 0x05, - 0x9b, 0x49, 0xce, 0xf1, 0x09, 0x0a, 0x1e, 0x18, 0x8b, 0x6a, 0xb0, 0x90, 0xf4, 0x84, 0xb0, 0x22, - 0x5f, 0x14, 0x0a, 0x17, 0x79, 0x35, 0xbf, 0x35, 0xc8, 0xc4, 0x27, 0xe5, 0x91, 0x0f, 0x8b, 0xae, - 0x37, 0xcc, 0x65, 0x5e, 0x11, 0x8a, 0x5e, 0xe3, 0xeb, 0x53, 0xf7, 0x9e, 0xed, 0x2e, 0x43, 0xf9, - 0x78, 0xb8, 0xde, 0xa5, 0x5b, 0x30, 0x97, 0x8a, 0x42, 0x2f, 0x54, 0x66, 0x3d, 0xca, 0xf0, 0xd1, - 0x89, 0xcc, 0x8a, 0xbe, 0xaf, 0xc1, 0x6c, 0xd2, 0x2a, 0x99, 0x36, 0xeb, 0x67, 0xf0, 0xb7, 0x9f, - 0xcc, 0xdd, 0xea, 0xdb, 0x9e, 0x24, 0x0f, 0xa7, 0x40, 0x51, 0x6f, 0x48, 0xf3, 0xbc, 0x3e, 0x6e, - 0xe6, 0x3e, 0x75, 0xeb, 0x6c, 0x7c, 0xa8, 0xc1, 0x70, 0xe7, 0x45, 0x3e, 0xe4, 0x6d, 0xf9, 0xe1, - 0x97, 0x5c, 0x91, 0xb1, 0xbf, 0x24, 0x49, 0x7d, 0x3f, 0x16, 0x5e, 0xf8, 0x47, 0x34, 0xac, 0x40, - 0x8c, 0xbf, 0x6b, 0x90, 0x13, 0x37, 0xed, 0xe8, 0xd5, 0xc4, 0x7e, 0x56, 0x8b, 0xd2, 0x82, 0xec, - 0x5b, 0xa4, 0x1f, 0x6e, 0xee, 0x95, 0xd4, 0xe6, 0xc6, 0xd9, 0xef, 0x1d, 0x4e, 0x94, 0x7b, 0x8d, - 0xd6, 0x60, 0x9a, 0xec, 0xed, 0x11, 0x9b, 0xc9, 0xd4, 0xf3, 0x6a, 0x54, 0x3f, 0x6d, 0x0a, 0x2a, - 0x4f, 0x10, 0x02, 0x2c, 0x7c, 0xc5, 0x52, 0x98, 0xf7, 0xe5, 0xcc, 0xed, 0x90, 0x75, 0xc7, 0x21, - 0xce, 0x99, 0x5c, 0x47, 0x8a, 0x86, 0x6c, 0x27, 0x52, 0x89, 0x63, 0xed, 0xbc, 0x91, 0xbd, 0xc4, - 0x93, 0x93, 0xb3, 0xe5, 0xdb, 0x56, 0x3b, 0x2c, 0x58, 0x31, 0xd9, 0x23, 0x01, 0xf1, 0x6c, 0x82, - 0xae, 0x42, 0xde, 0xea, 0xba, 0x6f, 0x04, 0x7e, 0x2f, 0xba, 0x40, 0x14, 0xeb, 0xb6, 0x6e, 0xd6, - 0x05, 0x0d, 0x2b, 0x2e, 0x2f, 0x58, 0xf6, 0x5d, 0xcf, 0x91, 0xab, 0xa1, 0x0a, 0x96, 0xb7, 0x5c, - 0xcf, 0xc1, 0x82, 0xa3, 0xca, 0xa5, 0xec, 0xa8, 0x72, 0xc9, 0xb8, 0x03, 0xc5, 0xc4, 0x77, 0x67, - 0x3c, 0x75, 0x77, 0xf8, 0x83, 0x69, 0xb1, 0xd6, 0x60, 0xea, 0xde, 0x8e, 0x18, 0x38, 0x96, 0xa9, - 0x7e, 0xe3, 0xf1, 0xd3, 0xe5, 0x0b, 0x1f, 0x3d, 0x5d, 0xbe, 0xf0, 0xe4, 0xe9, 0xf2, 0x85, 0x0f, - 0x8e, 0x96, 0xb5, 0xc7, 0x47, 0xcb, 0xda, 0x47, 0x47, 0xcb, 0xda, 0x93, 0xa3, 0x65, 0xed, 0xe3, - 0xa3, 0x65, 0xed, 0x67, 0x7f, 0x5d, 0xbe, 0xf0, 0xf5, 0xb5, 0xb1, 0xbe, 0xd4, 0xfc, 0x6f, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x3f, 0xf4, 0xc9, 0x6c, 0xe1, 0x29, 0x00, 0x00, + 0x6f, 0x0f, 0x42, 0xc3, 0x2f, 0x47, 0x33, 0x10, 0x98, 0x9d, 0xd9, 0x19, 0x26, 0x76, 0x32, 0x3b, + 0xde, 0x4d, 0x66, 0x4d, 0x39, 0x5a, 0x10, 0x1c, 0xd8, 0x4e, 0x77, 0xc5, 0xe9, 0x8d, 0xdd, 0x6d, + 0xba, 0xca, 0x99, 0xb5, 0x84, 0x60, 0x57, 0x08, 0xb4, 0xc0, 0x48, 0xf0, 0x0f, 0x70, 0xe1, 0xc6, + 0x19, 0x2e, 0x88, 0x33, 0xd2, 0x70, 0x5b, 0x6e, 0x23, 0x84, 0xc2, 0x4e, 0x90, 0xb8, 0x71, 0x40, + 0x9c, 0x08, 0x12, 0x42, 0x55, 0x5d, 0x55, 0xdd, 0xed, 0xd8, 0x33, 0x19, 0x3b, 0x52, 0xc4, 0xc9, + 0xdd, 0xef, 0xbd, 0x7a, 0x9f, 0x7a, 0x55, 0xaf, 0xde, 0x8f, 0x72, 0xc3, 0x46, 0xd3, 0x63, 0x7b, + 0xdd, 0x9d, 0xb2, 0x13, 0xb4, 0x57, 0x1c, 0xaf, 0xe5, 0x75, 0xf5, 0x4f, 0x67, 0xbf, 0xb9, 0xb2, + 0x7f, 0x83, 0xae, 0xd0, 0x96, 0xd7, 0x16, 0x0f, 0x76, 0xc7, 0x5b, 0x71, 0x82, 0x90, 0xac, 0x1c, + 0x5c, 0x5b, 0x69, 0x12, 0x9f, 0x84, 0x36, 0x23, 0x6e, 0xb9, 0x13, 0x06, 0x2c, 0x40, 0xab, 0xb1, + 0x9a, 0x72, 0x34, 0x5e, 0xfd, 0x74, 0xf6, 0x9b, 0xe5, 0xfd, 0x1b, 0xb4, 0xcc, 0xd5, 0x88, 0x07, + 0xbb, 0xe3, 0x95, 0xb9, 0x9a, 0xf2, 0xc1, 0xb5, 0x85, 0x7b, 0x2f, 0x84, 0x4e, 0x57, 0xda, 0x84, + 0xd9, 0x03, 0xe0, 0x17, 0xbe, 0x90, 0xd0, 0xd3, 0x0c, 0x9a, 0xc1, 0x8a, 0x20, 0xef, 0x74, 0x77, + 0xc5, 0x9b, 0x78, 0x11, 0x4f, 0x52, 0x9c, 0x2b, 0x2c, 0x7b, 0x01, 0xd7, 0xd9, 0xb6, 0x9d, 0x3d, + 0xcf, 0x27, 0x61, 0x4f, 0x20, 0x86, 0x5d, 0x9f, 0x79, 0x6d, 0x72, 0x42, 0xff, 0x97, 0x9f, 0x37, + 0x80, 0x3a, 0x7b, 0xa4, 0x6d, 0xf7, 0x8f, 0xb3, 0x36, 0xa1, 0x54, 0x6d, 0x79, 0xc4, 0x67, 0xb5, + 0x7a, 0x35, 0xf0, 0x77, 0xbd, 0x26, 0xba, 0x09, 0x25, 0x3e, 0x20, 0xe8, 0xb2, 0x06, 0x71, 0x02, + 0xdf, 0xa5, 0xa6, 0xb1, 0x6c, 0x5c, 0xcd, 0x55, 0xd0, 0xd1, 0xe1, 0x52, 0x69, 0x3b, 0xc5, 0xc1, + 0x7d, 0x92, 0xd6, 0xef, 0x32, 0x50, 0xa8, 0x06, 0x3e, 0xb3, 0x39, 0x3e, 0x5a, 0x86, 0x09, 0xdf, + 0x6e, 0x13, 0x31, 0xbe, 0x50, 0x99, 0x7e, 0x7c, 0xb8, 0x74, 0xe1, 0xe8, 0x70, 0x69, 0xe2, 0x81, + 0xdd, 0x26, 0x58, 0x70, 0xd0, 0x15, 0xc8, 0x79, 0x6d, 0xbb, 0x49, 0xcc, 0x8c, 0x10, 0x99, 0x91, + 0x22, 0xb9, 0x1a, 0x27, 0xe2, 0x88, 0x87, 0x3c, 0xc8, 0x75, 0x82, 0x90, 0x51, 0x73, 0x72, 0x39, + 0x7b, 0xb5, 0x78, 0x7d, 0xbd, 0x3c, 0xd2, 0x4e, 0x96, 0xf5, 0xbc, 0xea, 0x41, 0xc8, 0x62, 0x28, + 0xfe, 0x46, 0x71, 0x84, 0x80, 0xbe, 0x0b, 0xd3, 0x07, 0x41, 0xab, 0xdb, 0x26, 0x5b, 0x41, 0xd7, + 0x67, 0xd4, 0x2c, 0x08, 0xc4, 0xca, 0x88, 0x88, 0xef, 0xc4, 0xaa, 0x2a, 0x2f, 0x4b, 0xbc, 0xe9, + 0x04, 0x91, 0xe2, 0x14, 0x9a, 0xf5, 0x1f, 0x03, 0x66, 0x52, 0xb3, 0x3c, 0xc5, 0x0a, 0x7e, 0x1e, + 0xf2, 0x7b, 0x01, 0x65, 0x5c, 0x5a, 0x2c, 0x62, 0xae, 0xf2, 0x92, 0x94, 0xca, 0xdf, 0x97, 0x74, + 0xac, 0x25, 0xd0, 0x2d, 0x98, 0x71, 0x92, 0x00, 0x66, 0x56, 0x0c, 0x99, 0x97, 0x43, 0xd2, 0xe8, + 0x38, 0x2d, 0x8b, 0x6e, 0x40, 0x5e, 0xf8, 0x8c, 0x13, 0xb4, 0xcc, 0x09, 0x31, 0xa1, 0xcb, 0x0a, + 0xaa, 0x2e, 0xe9, 0xc7, 0x89, 0x67, 0xac, 0xa5, 0xd1, 0xa7, 0x61, 0x92, 0x4f, 0xa1, 0x56, 0x37, + 0x73, 0x62, 0x5c, 0x49, 0x8e, 0x9b, 0xbc, 0x2f, 0xa8, 0x58, 0x72, 0xad, 0x1f, 0x1a, 0x50, 0xd2, + 0x53, 0x68, 0x30, 0x9b, 0x11, 0x44, 0x61, 0x2a, 0xec, 0xfa, 0xbe, 0xe7, 0x37, 0x85, 0x79, 0xc5, + 0xeb, 0x9b, 0xe3, 0x6e, 0xbf, 0xd0, 0x8b, 0x23, 0x9d, 0x95, 0xe2, 0xd1, 0xe1, 0xd2, 0x94, 0x7c, + 0xc1, 0x0a, 0xc9, 0xfa, 0x91, 0x01, 0xf3, 0x03, 0xe5, 0x51, 0x1b, 0x0a, 0x94, 0xd9, 0x21, 0x23, + 0xee, 0x1a, 0x13, 0xbb, 0x52, 0xbc, 0xfe, 0xfa, 0x8b, 0x4d, 0x88, 0x96, 0x79, 0x88, 0xe0, 0x33, + 0xe2, 0x67, 0xa9, 0x32, 0x27, 0x97, 0xa2, 0xd0, 0x50, 0x6a, 0x71, 0x8c, 0x60, 0xfd, 0xc6, 0x80, + 0xd9, 0xd4, 0x44, 0xba, 0x14, 0xbd, 0x07, 0x39, 0xca, 0xa7, 0x24, 0xd7, 0x63, 0xe3, 0x4c, 0xd6, + 0x23, 0x3e, 0x0f, 0x91, 0xb9, 0x11, 0x04, 0x5a, 0x85, 0xa2, 0xf6, 0x81, 0xda, 0xba, 0x99, 0x17, + 0xbb, 0x77, 0x51, 0x8a, 0x16, 0xab, 0x31, 0x0b, 0x27, 0xe5, 0xac, 0xaf, 0xc3, 0xec, 0x86, 0xef, + 0x76, 0x02, 0xcf, 0x67, 0x6b, 0xae, 0x1b, 0x12, 0x4a, 0xd1, 0x02, 0x64, 0xbc, 0x8e, 0xf4, 0x63, + 0x90, 0x0a, 0x32, 0xb5, 0x3a, 0xce, 0x78, 0x1d, 0x74, 0x15, 0xf2, 0x7e, 0xe0, 0x12, 0xee, 0xd5, + 0xd2, 0xb1, 0xa6, 0xb9, 0x53, 0x3d, 0x90, 0x34, 0xac, 0xb9, 0xd6, 0x23, 0x03, 0xa6, 0x95, 0xe6, + 0x53, 0x1e, 0x90, 0x65, 0x98, 0xe8, 0xc4, 0x87, 0x43, 0x4b, 0x08, 0x07, 0x17, 0x9c, 0x94, 0x5f, + 0x67, 0x5f, 0xc4, 0xaf, 0xad, 0x7f, 0x1a, 0x50, 0x52, 0xd3, 0x69, 0x74, 0x77, 0x28, 0x61, 0xe8, + 0x21, 0x14, 0xec, 0xc8, 0x64, 0xc2, 0x03, 0x27, 0x0f, 0x1f, 0xf7, 0x46, 0xdc, 0xa1, 0xbe, 0x25, + 0x8c, 0x5d, 0x65, 0x4d, 0x01, 0xe0, 0x18, 0x0b, 0xed, 0xa9, 0x28, 0x99, 0x15, 0xa0, 0xd5, 0x31, + 0x41, 0x87, 0x07, 0x49, 0xeb, 0x1f, 0x06, 0x14, 0x94, 0x18, 0x45, 0x21, 0xe4, 0xb9, 0x43, 0xbb, + 0x36, 0xb3, 0xe5, 0x81, 0xa8, 0x8c, 0x7a, 0x20, 0xde, 0xde, 0x79, 0x8f, 0x38, 0x6c, 0x8b, 0x30, + 0xbb, 0x82, 0x24, 0x32, 0xc4, 0x34, 0xac, 0x71, 0x50, 0x07, 0xa6, 0xa8, 0x58, 0x6e, 0x6a, 0x66, + 0x84, 0xb5, 0x1b, 0x63, 0x5a, 0x1b, 0x6d, 0x5e, 0x65, 0x56, 0xa2, 0x4e, 0x45, 0xef, 0x14, 0x2b, + 0x18, 0xeb, 0x6f, 0x06, 0xcc, 0x68, 0x9b, 0x37, 0x3d, 0xca, 0x90, 0x7f, 0xc2, 0xee, 0xbb, 0xa3, + 0xda, 0xcd, 0xf5, 0x09, 0xab, 0x75, 0xe8, 0x56, 0x94, 0x84, 0xcd, 0x04, 0x72, 0x1e, 0x23, 0x6d, + 0x65, 0xf1, 0xdd, 0x31, 0x2d, 0xa6, 0x89, 0x64, 0xcb, 0xd5, 0xe2, 0x48, 0xbb, 0xf5, 0x61, 0x06, + 0x2e, 0x6e, 0x06, 0xb6, 0x5b, 0xb1, 0x5b, 0xb6, 0xef, 0x90, 0xb0, 0xe6, 0x37, 0x9f, 0x7b, 0x7e, + 0x65, 0x0e, 0x12, 0x07, 0x31, 0x4a, 0xe4, 0xa9, 0x1c, 0xe4, 0x8b, 0x33, 0xac, 0x24, 0xd0, 0x0d, + 0x98, 0xf4, 0x3a, 0x5b, 0x81, 0x4b, 0xe4, 0x61, 0x5b, 0xe6, 0x89, 0xa0, 0x56, 0xe7, 0x94, 0xe3, + 0xc3, 0x25, 0x94, 0x02, 0x17, 0x54, 0x2c, 0xe5, 0xd1, 0xae, 0x72, 0xf1, 0x09, 0xb1, 0x04, 0x6b, + 0x23, 0x2e, 0x01, 0x77, 0xe6, 0x28, 0x96, 0x0e, 0x71, 0xf0, 0x9f, 0x1a, 0x90, 0x9a, 0x86, 0x0c, + 0xbc, 0x5d, 0x98, 0xf2, 0xa2, 0xd5, 0x90, 0x07, 0xfb, 0xcd, 0x11, 0x27, 0x30, 0x60, 0x7d, 0x63, + 0xd7, 0x93, 0x04, 0xac, 0xb0, 0xac, 0xef, 0x43, 0x81, 0x87, 0x33, 0xda, 0xb1, 0x1d, 0x72, 0x1e, + 0xa7, 0x4d, 0xf8, 0xbe, 0x9e, 0xc1, 0xff, 0xb2, 0xef, 0x6b, 0x23, 0x86, 0xf8, 0xfe, 0xe3, 0x0c, + 0x4c, 0xf0, 0xa4, 0x73, 0x2e, 0x31, 0xcd, 0x86, 0x09, 0xda, 0x21, 0x8e, 0xcc, 0xea, 0x5f, 0x1d, + 0xd5, 0xc4, 0xc0, 0x25, 0x8d, 0x0e, 0x71, 0xe2, 0x44, 0xc7, 0xdf, 0xb0, 0x50, 0x8d, 0x3c, 0x98, + 0xa4, 0xc2, 0x95, 0xc5, 0xc9, 0x1b, 0xfd, 0x00, 0x09, 0x90, 0xe8, 0x00, 0xe9, 0x4a, 0x2e, 0x7a, + 0xc7, 0x12, 0xc0, 0x6a, 0x43, 0x91, 0x4b, 0xa9, 0xec, 0xff, 0x45, 0x98, 0x60, 0xbd, 0x8e, 0x4a, + 0xd3, 0x4b, 0x6a, 0x6e, 0xdb, 0xbd, 0x0e, 0x3f, 0xf3, 0xb3, 0x09, 0x51, 0x4e, 0xc2, 0x42, 0x18, + 0x7d, 0x06, 0xa6, 0x64, 0x7a, 0x93, 0x51, 0x45, 0x9f, 0x11, 0x29, 0x8b, 0x15, 0xdf, 0xfa, 0x15, + 0x77, 0xd1, 0xc0, 0x25, 0xd5, 0xc0, 0x77, 0x3d, 0xe6, 0x05, 0x3e, 0x5a, 0x4d, 0x21, 0xfe, 0x7f, + 0x1f, 0xe2, 0x5c, 0x4a, 0x38, 0x81, 0xf9, 0x9a, 0x5e, 0xa2, 0x4c, 0x6a, 0xa0, 0xb4, 0x8f, 0x4f, + 0x56, 0x0f, 0x4b, 0x9b, 0xcc, 0x8b, 0xdc, 0x90, 0xd8, 0x34, 0xf0, 0xfb, 0x8b, 0x5c, 0x2c, 0xa8, + 0x58, 0x72, 0xad, 0x3f, 0x1b, 0x20, 0x4a, 0x9b, 0x73, 0x39, 0x49, 0xef, 0xa6, 0x4f, 0xd2, 0xad, + 0x31, 0x3c, 0x60, 0xc8, 0x21, 0xfa, 0x97, 0x34, 0x8f, 0xfb, 0x1d, 0xdf, 0xc2, 0x4e, 0xe0, 0x56, + 0x6b, 0xeb, 0x58, 0x6e, 0x84, 0xde, 0xc2, 0x7a, 0x44, 0xc6, 0x8a, 0xcf, 0x8b, 0x40, 0xf9, 0x48, + 0xcd, 0xa9, 0xe5, 0xac, 0x2a, 0x02, 0xa5, 0x1c, 0xc5, 0x9a, 0x8b, 0xae, 0x03, 0x74, 0xc2, 0xe0, + 0xc0, 0x73, 0x45, 0x4d, 0x1a, 0x25, 0x11, 0x7d, 0xb6, 0xea, 0x9a, 0x83, 0x13, 0x52, 0xc8, 0x85, + 0x49, 0x5e, 0x9d, 0x32, 0x6a, 0xe6, 0x84, 0xe1, 0xaf, 0x8f, 0x68, 0xf8, 0x36, 0x57, 0x12, 0x6f, + 0xad, 0x78, 0xa5, 0x58, 0xea, 0xb6, 0xfe, 0x6d, 0x00, 0xc4, 0x87, 0x03, 0xbd, 0x0f, 0xe0, 0x28, + 0x67, 0x51, 0x49, 0x6b, 0x7d, 0x8c, 0x15, 0xd7, 0x9e, 0x17, 0x9b, 0xab, 0x49, 0x14, 0x27, 0xb0, + 0x10, 0x4d, 0x56, 0xa1, 0xb9, 0xb1, 0x9a, 0xd8, 0xc4, 0xd9, 0x7c, 0x76, 0x05, 0x6a, 0xfd, 0x3e, + 0x03, 0xd9, 0x7a, 0xe0, 0x9e, 0x4b, 0xf4, 0x7c, 0x37, 0x15, 0x3d, 0xef, 0x8c, 0x5c, 0x19, 0xb8, + 0x43, 0x83, 0xe7, 0x5e, 0x5f, 0xf0, 0xbc, 0x3b, 0x06, 0xc6, 0xb3, 0x63, 0xe7, 0x93, 0x2c, 0x4c, + 0x73, 0xb7, 0xd7, 0xb1, 0xec, 0x4b, 0xa9, 0x58, 0xb6, 0xdc, 0x17, 0xcb, 0x5e, 0x4a, 0xca, 0x9e, + 0x4d, 0x28, 0xeb, 0xc1, 0x4c, 0xcb, 0xa6, 0xac, 0x1e, 0x06, 0x3b, 0x84, 0x77, 0xa9, 0xd2, 0xe4, + 0xf1, 0x3a, 0x5d, 0x7d, 0xc9, 0xb0, 0x99, 0x54, 0x8d, 0xd3, 0x48, 0xe8, 0x23, 0x03, 0x10, 0xa7, + 0x6c, 0x87, 0xb6, 0x4f, 0x23, 0x93, 0x3c, 0xd9, 0x16, 0x8e, 0x3b, 0x81, 0x05, 0x39, 0x01, 0xb4, + 0x79, 0x42, 0x3f, 0x1e, 0x80, 0x79, 0xda, 0x80, 0xce, 0x83, 0x5c, 0x9b, 0x50, 0x6a, 0x37, 0x89, + 0x39, 0x99, 0x0e, 0x72, 0x5b, 0x11, 0x19, 0x2b, 0xbe, 0x75, 0x05, 0x72, 0xf5, 0xc0, 0xad, 0xd5, + 0x9f, 0x55, 0x4e, 0x5b, 0x7f, 0x32, 0x80, 0x87, 0xc7, 0x73, 0xc9, 0x0f, 0xdf, 0x4e, 0xe7, 0x87, + 0x9b, 0xa3, 0x3b, 0xf9, 0x90, 0xf4, 0xf0, 0xeb, 0xac, 0x30, 0x4e, 0x64, 0x87, 0x0f, 0x0c, 0x28, + 0x79, 0xbe, 0xc7, 0xf4, 0x3d, 0x02, 0x35, 0x5f, 0x1e, 0xab, 0xc0, 0xd3, 0x8a, 0x2a, 0xaf, 0x48, + 0xf0, 0x52, 0x2d, 0xa5, 0x1f, 0xf7, 0xe1, 0x21, 0x26, 0x42, 0xb4, 0x42, 0xcf, 0x9c, 0x11, 0x7a, + 0x32, 0x3c, 0x2b, 0xe4, 0x04, 0x0e, 0x7a, 0x13, 0x10, 0x25, 0xe1, 0x81, 0xe7, 0x90, 0x35, 0xc7, + 0x09, 0xba, 0x3e, 0x13, 0x57, 0x1f, 0xd1, 0xed, 0x8a, 0xf6, 0xd2, 0xc6, 0x09, 0x09, 0x3c, 0x60, + 0x14, 0x6f, 0xbe, 0xf4, 0xe5, 0x09, 0xa4, 0x9b, 0xaf, 0x93, 0x17, 0x28, 0x68, 0x15, 0x8a, 0xbc, + 0x11, 0x7b, 0x40, 0xd8, 0xc3, 0x20, 0xdc, 0x37, 0x8b, 0xcb, 0xc6, 0xd5, 0x7c, 0x7c, 0xa1, 0x73, + 0x3f, 0x66, 0xe1, 0xa4, 0x9c, 0xf5, 0xcb, 0x1c, 0x14, 0x74, 0xe0, 0x42, 0x2b, 0x90, 0xeb, 0xec, + 0xd9, 0x54, 0x05, 0xa4, 0x4b, 0xba, 0x89, 0xe2, 0xc4, 0xe3, 0x28, 0x69, 0x8b, 0x67, 0x1c, 0xc9, + 0xa1, 0x87, 0xa9, 0x44, 0x98, 0x19, 0xeb, 0x82, 0x22, 0x19, 0xed, 0x9e, 0x9b, 0x07, 0x4f, 0x79, + 0xf1, 0x88, 0xae, 0xf0, 0xce, 0xd2, 0xad, 0xd5, 0xe5, 0x01, 0x4e, 0xb4, 0x85, 0x6e, 0xad, 0x8e, + 0x23, 0x1e, 0xaf, 0x21, 0xc4, 0x03, 0x35, 0xa7, 0xc7, 0xaa, 0x21, 0x84, 0xd2, 0x78, 0x2a, 0xe2, + 0x95, 0x62, 0xa9, 0x1b, 0x79, 0xf2, 0x86, 0x51, 0x84, 0xbd, 0xa9, 0x33, 0x08, 0x7b, 0x33, 0xfa, + 0x76, 0x51, 0x44, 0xba, 0x58, 0x3b, 0xfa, 0x99, 0x01, 0x73, 0x4e, 0xfa, 0x76, 0x91, 0x50, 0x33, + 0x3f, 0xd6, 0xa5, 0x55, 0xdf, 0x6d, 0xa5, 0x76, 0x8e, 0xb9, 0x6a, 0x3f, 0x10, 0x3e, 0x89, 0x8d, + 0x6e, 0x41, 0xfe, 0x3b, 0x01, 0xad, 0xb6, 0x6c, 0x4a, 0xcd, 0x42, 0xaa, 0x57, 0xc8, 0x7f, 0xed, + 0xed, 0x86, 0xa0, 0x1f, 0x1f, 0x2e, 0x15, 0xeb, 0x81, 0xab, 0x5e, 0xb1, 0x1e, 0x60, 0xfd, 0xd8, + 0x00, 0x88, 0x7b, 0x7b, 0x7d, 0xf1, 0x67, 0x9c, 0xea, 0xe2, 0x2f, 0xf3, 0x42, 0x17, 0xda, 0x4b, + 0x90, 0x23, 0x61, 0x18, 0x84, 0xb2, 0xfa, 0x2c, 0x70, 0x5f, 0xd9, 0xe0, 0x04, 0x1c, 0xd1, 0xad, + 0x3f, 0x4c, 0xc0, 0x64, 0x83, 0x38, 0x21, 0x61, 0xe7, 0x52, 0x0e, 0x7d, 0x0e, 0x0a, 0x5e, 0xbb, + 0xdd, 0x65, 0xf6, 0x4e, 0x8b, 0x08, 0xd7, 0xcf, 0x47, 0x6e, 0x50, 0x53, 0x44, 0x1c, 0xf3, 0x51, + 0x08, 0x13, 0x62, 0x72, 0xd1, 0xb9, 0x7c, 0x63, 0xc4, 0x8d, 0x8f, 0xac, 0x2d, 0xaf, 0xdb, 0xcc, + 0xde, 0xf0, 0x59, 0xd8, 0xd3, 0xf9, 0x7e, 0x82, 0x93, 0x7e, 0xf2, 0x97, 0xa5, 0x5c, 0xa5, 0xc7, + 0x08, 0xc5, 0x02, 0x0b, 0x7d, 0x68, 0x00, 0x50, 0x16, 0x7a, 0x7e, 0x93, 0x73, 0x65, 0x6d, 0xbc, + 0x35, 0x1e, 0x74, 0x43, 0xeb, 0x8b, 0x26, 0xa0, 0x97, 0x28, 0x66, 0xe0, 0x04, 0x28, 0x2a, 0xcb, + 0xb2, 0x2a, 0x9b, 0x8a, 0xbb, 0xaa, 0xac, 0x82, 0x48, 0x6b, 0x5c, 0x50, 0x2d, 0x7c, 0x05, 0x0a, + 0x5a, 0x39, 0x7a, 0x09, 0xb2, 0xfb, 0xa4, 0x17, 0x45, 0x40, 0xcc, 0x1f, 0xd1, 0xcb, 0x90, 0x3b, + 0xb0, 0x5b, 0xdd, 0xe8, 0x0a, 0x6c, 0x1a, 0x47, 0x2f, 0x37, 0x33, 0x37, 0x8c, 0x85, 0xdb, 0x30, + 0xdb, 0x37, 0xb7, 0xe7, 0x0d, 0x2f, 0x24, 0x86, 0x5b, 0x9f, 0x18, 0x20, 0x27, 0x73, 0x2e, 0x25, + 0xc1, 0x4e, 0xba, 0x24, 0xb8, 0x3d, 0xd6, 0x26, 0x0d, 0xa9, 0x0a, 0xfe, 0x98, 0x81, 0x29, 0x99, + 0xef, 0xce, 0xe5, 0xbc, 0xb8, 0xa9, 0xf6, 0xa1, 0x32, 0xb2, 0x89, 0xc2, 0x82, 0xa1, 0x2d, 0x44, + 0xab, 0xaf, 0x85, 0x58, 0x1f, 0x13, 0xe7, 0xd9, 0x6d, 0xc4, 0x91, 0x01, 0x45, 0x29, 0x79, 0x2e, + 0x7e, 0xe3, 0xa4, 0xfd, 0xe6, 0xce, 0x78, 0xc6, 0x0e, 0x71, 0x9c, 0xdf, 0xc6, 0x46, 0x9e, 0xf2, + 0xff, 0xa0, 0xd1, 0x83, 0xbe, 0x4a, 0x28, 0xd9, 0xa1, 0x09, 0x45, 0xd6, 0x62, 0xe2, 0x9f, 0xd5, + 0x5c, 0xfa, 0xcf, 0xd8, 0x07, 0x92, 0x8e, 0xb5, 0x84, 0xf5, 0xa8, 0xa8, 0xe7, 0x2e, 0xca, 0xe1, + 0xa6, 0xba, 0xde, 0x36, 0xc6, 0x6a, 0xd8, 0x13, 0xcb, 0x31, 0xe4, 0x5f, 0xee, 0xef, 0x41, 0x9e, + 0x92, 0x16, 0x71, 0x58, 0x10, 0xca, 0xcd, 0xa9, 0x8f, 0xef, 0xf1, 0xe5, 0x86, 0x54, 0x19, 0x05, + 0x5f, 0x6d, 0xb8, 0x22, 0x63, 0x8d, 0x89, 0x56, 0xa0, 0xe0, 0xb4, 0xba, 0x94, 0x91, 0xb0, 0x56, + 0x97, 0xd1, 0x57, 0xdf, 0x2c, 0x54, 0x15, 0x03, 0xc7, 0x32, 0xa8, 0x0c, 0xa0, 0x5f, 0xa8, 0x89, + 0xc4, 0xed, 0x50, 0x49, 0x94, 0x7d, 0x9a, 0x8a, 0x13, 0x12, 0x68, 0x45, 0x46, 0xf6, 0xe8, 0xcf, + 0xc4, 0xff, 0xeb, 0x8b, 0xec, 0x6a, 0xd1, 0x13, 0xbd, 0xf2, 0x35, 0x28, 0x92, 0xf7, 0x19, 0x09, + 0x7d, 0xbb, 0xc5, 0x11, 0x72, 0x02, 0x61, 0x96, 0x97, 0xc4, 0x1b, 0x31, 0x19, 0x27, 0x65, 0xd0, + 0x36, 0xcc, 0x52, 0x42, 0xa9, 0x17, 0xf8, 0x6b, 0xbb, 0xbb, 0xbc, 0xab, 0xe8, 0x89, 0x6a, 0xad, + 0x50, 0xf9, 0xac, 0x84, 0x9b, 0x6d, 0xa4, 0xd9, 0xc7, 0x82, 0x14, 0xd5, 0xef, 0x92, 0x84, 0xfb, + 0x55, 0xa0, 0x3b, 0x50, 0x6a, 0xa5, 0xfe, 0x00, 0x91, 0x5d, 0x81, 0xee, 0x67, 0xd2, 0x7f, 0x8f, + 0xe0, 0x3e, 0x69, 0xf4, 0x0d, 0x30, 0x93, 0x94, 0x46, 0xd0, 0x0d, 0x1d, 0x82, 0x6d, 0xbf, 0x49, + 0xa2, 0x8f, 0x19, 0x0a, 0x95, 0xcb, 0x47, 0x87, 0x4b, 0xe6, 0xe6, 0x10, 0x19, 0x3c, 0x74, 0x34, + 0xa2, 0x30, 0xaf, 0xcc, 0xdf, 0x0e, 0xed, 0xdd, 0x5d, 0xcf, 0xa9, 0x07, 0x2d, 0xcf, 0xe9, 0x89, + 0x1e, 0xa2, 0x50, 0xb9, 0x2d, 0x27, 0x38, 0xbf, 0x31, 0x48, 0xe8, 0xf8, 0x70, 0xe9, 0xb2, 0xb4, + 0x7d, 0x20, 0x1f, 0x0f, 0xd6, 0x8d, 0xb6, 0xe0, 0xe2, 0x1e, 0xb1, 0x5b, 0x6c, 0xaf, 0xba, 0x47, + 0x9c, 0x7d, 0x75, 0x86, 0xcc, 0x69, 0x71, 0xb6, 0xd4, 0xbe, 0x5e, 0xbc, 0x7f, 0x52, 0x04, 0x0f, + 0x1a, 0x87, 0x7e, 0x61, 0xc0, 0x7c, 0xdf, 0x8a, 0x47, 0x1f, 0xbd, 0x98, 0xa5, 0xb1, 0xbe, 0x2d, + 0x68, 0x0c, 0xd2, 0x59, 0xb9, 0xc4, 0x97, 0x63, 0x20, 0x0b, 0x0f, 0x9e, 0x05, 0xba, 0x09, 0xe0, + 0x75, 0xee, 0xd9, 0x6d, 0xaf, 0xe5, 0x11, 0x6a, 0x5e, 0x14, 0xfb, 0xb5, 0xc0, 0xfd, 0xbc, 0x56, + 0x57, 0x54, 0x1e, 0x9b, 0xe4, 0x5b, 0x0f, 0x27, 0xa4, 0xd1, 0x26, 0x94, 0xe4, 0x5b, 0x4f, 0x6e, + 0xcc, 0x9c, 0xd8, 0x98, 0x4f, 0x89, 0x2e, 0xb8, 0x9e, 0xe4, 0x1c, 0x9f, 0xa0, 0xe0, 0xbe, 0xb1, + 0xa8, 0x0a, 0x73, 0x49, 0x4f, 0x88, 0x2a, 0xf2, 0x79, 0xa1, 0x70, 0x9e, 0x57, 0xf3, 0x9b, 0xfd, + 0x4c, 0x7c, 0x52, 0x1e, 0x05, 0x30, 0xef, 0xf9, 0x83, 0x5c, 0xe6, 0x15, 0xa1, 0xe8, 0x35, 0xbe, + 0x3e, 0x35, 0xff, 0xd9, 0xee, 0x32, 0x90, 0x8f, 0x07, 0xeb, 0x5d, 0xb8, 0x05, 0x33, 0xa9, 0x28, + 0xf4, 0x42, 0x65, 0xd6, 0xa3, 0x0c, 0x1f, 0x9d, 0xc8, 0xac, 0xe8, 0x07, 0x06, 0x4c, 0x27, 0xad, + 0x92, 0x69, 0xb3, 0x76, 0x06, 0x7f, 0xfb, 0xc9, 0xdc, 0xad, 0xbf, 0x0a, 0x4a, 0xf2, 0x70, 0x0a, + 0x14, 0x75, 0x07, 0x34, 0xcf, 0x6b, 0xa3, 0x66, 0xee, 0x53, 0xb7, 0xce, 0xd6, 0x47, 0x06, 0x0c, + 0x76, 0x5e, 0x14, 0x40, 0xde, 0x91, 0x9f, 0x8c, 0xc9, 0x15, 0x19, 0xf9, 0x1b, 0x94, 0xd4, 0x97, + 0x67, 0xd1, 0x85, 0xbf, 0xa2, 0x61, 0x0d, 0x62, 0xfd, 0xdd, 0x80, 0x9c, 0xb8, 0x69, 0x47, 0xaf, + 0x26, 0xf6, 0xb3, 0x52, 0x94, 0x16, 0x64, 0xdf, 0x22, 0xbd, 0x68, 0x73, 0xaf, 0xa4, 0x36, 0x37, + 0xce, 0x7e, 0xef, 0x70, 0xa2, 0xdc, 0x6b, 0xb4, 0x0a, 0x93, 0x64, 0x77, 0x97, 0x38, 0x4c, 0xa6, + 0x9e, 0x57, 0x55, 0xfd, 0xb4, 0x21, 0xa8, 0x3c, 0x41, 0x08, 0xb0, 0xe8, 0x15, 0x4b, 0x61, 0xde, + 0x97, 0x33, 0xaf, 0x4d, 0xd6, 0x5c, 0x97, 0xb8, 0x67, 0x72, 0x1d, 0x29, 0x1a, 0xb2, 0x6d, 0xa5, + 0x12, 0xc7, 0xda, 0x79, 0x23, 0x7b, 0x89, 0x27, 0x27, 0x77, 0x33, 0x70, 0xec, 0x56, 0x54, 0xb0, + 0x62, 0xb2, 0x4b, 0x42, 0xe2, 0x3b, 0x04, 0x5d, 0x85, 0xbc, 0xdd, 0xf1, 0xde, 0x08, 0x83, 0xae, + 0xba, 0x40, 0x14, 0xeb, 0xb6, 0x56, 0xaf, 0x09, 0x1a, 0xd6, 0x5c, 0x5e, 0xb0, 0xec, 0x7b, 0xbe, + 0x2b, 0x57, 0x43, 0x17, 0x2c, 0x6f, 0x79, 0xbe, 0x8b, 0x05, 0x47, 0x97, 0x4b, 0xd9, 0x61, 0xe5, + 0x92, 0x75, 0x07, 0x8a, 0x89, 0x2f, 0xd6, 0x78, 0xea, 0x6e, 0xf3, 0x87, 0xba, 0xcd, 0xf6, 0xfa, + 0x53, 0xf7, 0x96, 0x62, 0xe0, 0x58, 0xa6, 0xf2, 0xad, 0xc7, 0x4f, 0x17, 0x2f, 0x7c, 0xfc, 0x74, + 0xf1, 0xc2, 0x93, 0xa7, 0x8b, 0x17, 0x3e, 0x38, 0x5a, 0x34, 0x1e, 0x1f, 0x2d, 0x1a, 0x1f, 0x1f, + 0x2d, 0x1a, 0x4f, 0x8e, 0x16, 0x8d, 0x4f, 0x8e, 0x16, 0x8d, 0x9f, 0xff, 0x75, 0xf1, 0xc2, 0x37, + 0x57, 0x47, 0xfa, 0xc6, 0xf3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc0, 0x64, 0x82, 0x76, 0x1b, + 0x2a, 0x00, 0x00, } func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { @@ -1832,6 +1834,13 @@ func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x22 } } + if m.IPMode != nil { + i -= len(*m.IPMode) + copy(dAtA[i:], *m.IPMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPMode))) + i-- + dAtA[i] = 0x1a + } i -= len(m.Hostname) copy(dAtA[i:], m.Hostname) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) @@ -3485,6 +3494,10 @@ func (m *LoadBalancerIngress) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Hostname) n += 1 + l + sovGenerated(uint64(l)) + if m.IPMode != nil { + l = len(*m.IPMode) + n += 1 + l + sovGenerated(uint64(l)) + } if len(m.Ports) > 0 { for _, e := range m.Ports { l = e.Size() @@ -4211,6 +4224,7 @@ func (this *LoadBalancerIngress) String() string { s := strings.Join([]string{`&LoadBalancerIngress{`, `IP:` + fmt.Sprintf("%v", this.IP) + `,`, `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `IPMode:` + valueToStringGenerated(this.IPMode) + `,`, `Ports:` + repeatedStringForPorts + `,`, `}`, }, "") @@ -6081,6 +6095,39 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := LoadBalancerIPMode(dAtA[iNdEx:postIndex]) + m.IPMode = &s + iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto index d45e97cb6d..eb3131a2e3 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto @@ -231,6 +231,15 @@ message LoadBalancerIngress { // +optional optional string hostname = 2; + // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. + // Setting this to "VIP" indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and node port or the pod's IP and port. + // Service implementations may use this information to adjust traffic routing. + // +optional + optional string ipMode = 3; + // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +listType=atomic @@ -420,7 +429,7 @@ message PodCondition { // PodIP represents a single IP address allocated to the pod. message PodIP { - // ip is an IP address (IPv4 or IPv6) assigned to the pod + // IP is the IP address assigned to the pod optional string ip = 1; } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go index 0e2113581c..2245d3869b 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go @@ -193,6 +193,9 @@ const ( // DisruptionTarget indicates the pod is about to be terminated due to a // disruption (such as preemption, eviction API or garbage-collection). DisruptionTarget PodConditionType = "DisruptionTarget" + // PodReadyToStartContainers pod sandbox is successfully configured and + // the pod is ready to launch containers. + PodReadyToStartContainers PodConditionType = "PodReadyToStartContainers" ) // These are reasons for a pod's transition to a condition. @@ -200,6 +203,22 @@ const ( // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler // can't schedule the pod right now, for example due to insufficient resources in the cluster. PodReasonUnschedulable = "Unschedulable" + + // PodReasonSchedulingGated reason in PodScheduled PodCondition means that the scheduler + // skips scheduling the pod because one or more scheduling gates are still present. + PodReasonSchedulingGated = "SchedulingGated" + + // PodReasonSchedulerError reason in PodScheduled PodCondition means that some internal error happens + // during scheduling, for example due to nodeAffinity parsing errors. + PodReasonSchedulerError = "SchedulerError" + + // TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination + // is initiated by kubelet + PodReasonTerminationByKubelet = "TerminationByKubelet" + + // PodReasonPreemptionByScheduler reason in DisruptionTarget pod condition indicates that the + // disruption was initiated by scheduler's preemption. + PodReasonPreemptionByScheduler = "PreemptionByScheduler" ) // PodCondition contains details for the current condition of this pod. @@ -311,7 +330,7 @@ type PodSpec struct { // PodIP represents a single IP address allocated to the pod. type PodIP struct { - // ip is an IP address (IPv4 or IPv6) assigned to the pod + // IP is the IP address assigned to the pod IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` } @@ -560,6 +579,15 @@ type LoadBalancerIngress struct { // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. + // Setting this to "VIP" indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and node port or the pod's IP and port. + // Service implementations may use this information to adjust traffic routing. + // +optional + IPMode *LoadBalancerIPMode `json:"ipMode,omitempty" protobuf:"bytes,3,opt,name=ipMode"` + // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +listType=atomic @@ -577,6 +605,8 @@ const ( IPv4Protocol IPFamily = "IPv4" // IPv6Protocol indicates that this IP is IPv6 protocol IPv6Protocol IPFamily = "IPv6" + // IPFamilyUnknown indicates that this IP is unknown protocol + IPFamilyUnknown IPFamily = "" ) // IPFamilyPolicy represents the dual-stack-ness requested or required by a Service @@ -1382,3 +1412,15 @@ type PortStatus struct { // +kubebuilder:validation:MaxLength=316 Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` } + +// LoadBalancerIPMode represents the mode of the LoadBalancer ingress IP +type LoadBalancerIPMode string + +const ( + // LoadBalancerIPModeVIP indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + LoadBalancerIPModeVIP LoadBalancerIPMode = "VIP" + // LoadBalancerIPModeProxy indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and port or the pod's IP and port. + LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy" +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepcopy.go index 62e4787571..182db51603 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepcopy.go @@ -285,6 +285,11 @@ func (in *EndpointsList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { *out = *in + if in.IPMode != nil { + in, out := &in.IPMode, &out.IPMode + *out = new(LoadBalancerIPMode) + **out = **in + } if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]PortStatus, len(*in)) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go index ce6e352f0e..aed723500b 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go @@ -340,6 +340,14 @@ func (in *LoadBalancerIngress) DeepEqual(other *LoadBalancerIngress) bool { if in.Hostname != other.Hostname { return false } + if (in.IPMode == nil) != (other.IPMode == nil) { + return false + } else if in.IPMode != nil { + if *in.IPMode != *other.IPMode { + return false + } + } + if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) { in, other := &in.Ports, &other.Ports if other == nil { diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto index 574650a212..49ca0e991f 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto @@ -103,8 +103,6 @@ message LabelSelector { // relates the key and values. message LabelSelectorRequirement { // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge optional string key = 1; // operator represents a key's relationship to a set of values. diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/types.go index a07fb86620..728b073128 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/types.go @@ -253,9 +253,7 @@ type MatchLabelsValue = string // relates the key and values. type LabelSelectorRequirement struct { // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` // operator represents a key's relationship to a set of values. // Valid operators are In, NotIn, Exists and DoesNotExist. // diff --git a/vendor/github.com/cilium/cilium/pkg/labels/cidr.go b/vendor/github.com/cilium/cilium/pkg/labels/cidr.go index 557621ee20..611ff5512d 100644 --- a/vendor/github.com/cilium/cilium/pkg/labels/cidr.go +++ b/vendor/github.com/cilium/cilium/pkg/labels/cidr.go @@ -21,9 +21,7 @@ import ( // // For IPv6 addresses, it converts ":" into "-" as EndpointSelectors don't // support colons inside the name section of a label. -func maskedIPToLabel(ip netip.Addr, prefix int) Label { - ipStr := ip.String() - +func maskedIPToLabel(ipStr string, prefix int) Label { var str strings.Builder str.Grow( 1 /* preZero */ + @@ -70,13 +68,13 @@ func IPStringToLabel(ip string) (Label, error) { if err != nil { return Label{}, fmt.Errorf("%q is not an IP address: %w", ip, err) } - return maskedIPToLabel(parsedIP, parsedIP.BitLen()), nil + return maskedIPToLabel(ip, parsedIP.BitLen()), nil } else { parsedPrefix, err := netip.ParsePrefix(ip) if err != nil { return Label{}, fmt.Errorf("%q is not a CIDR: %w", ip, err) } - return maskedIPToLabel(parsedPrefix.Masked().Addr(), parsedPrefix.Bits()), nil + return maskedIPToLabel(parsedPrefix.Masked().Addr().String(), parsedPrefix.Bits()), nil } } @@ -176,7 +174,7 @@ func computeCIDRLabels(cache *simplelru.LRU[netip.Prefix, []Label], lbls Labels, } // Compute the label for this prefix (e.g. "cidr:10.0.0.0/8") - prefixLabel := maskedIPToLabel(prefix.Addr(), ones) + prefixLabel := maskedIPToLabel(prefix.Addr().String(), ones) lbls[prefixLabel.Key] = prefixLabel // Keep computing the rest (e.g. "cidr:10.0.0.0/7", ...). diff --git a/vendor/github.com/cilium/cilium/pkg/labels/labels.go b/vendor/github.com/cilium/cilium/pkg/labels/labels.go index 5279d83449..50f6c78275 100644 --- a/vendor/github.com/cilium/cilium/pkg/labels/labels.go +++ b/vendor/github.com/cilium/cilium/pkg/labels/labels.go @@ -115,6 +115,9 @@ const ( // LabelSourceContainer is a label imported from the container runtime LabelSourceContainer = "container" + // LabelSourceCNI is a label imported from the CNI plugin + LabelSourceCNI = "cni" + // LabelSourceReserved is the label source for reserved types. LabelSourceReserved = "reserved" diff --git a/vendor/github.com/cilium/cilium/pkg/labels/oplabels.go b/vendor/github.com/cilium/cilium/pkg/labels/oplabels.go index f05f9f0455..96f394f462 100644 --- a/vendor/github.com/cilium/cilium/pkg/labels/oplabels.go +++ b/vendor/github.com/cilium/cilium/pkg/labels/oplabels.go @@ -116,22 +116,22 @@ func (o *OpLabels) AllLabels() Labels { return all } -func (o *OpLabels) ReplaceInformationLabels(l Labels, logger *logrus.Entry) bool { +func (o *OpLabels) ReplaceInformationLabels(sourceFilter string, l Labels, logger *logrus.Entry) bool { changed := false keepers := make(keepMarks) for _, v := range l { keepers.set(v.Key) - if o.OrchestrationInfo.upsertLabel(v) { + if o.OrchestrationInfo.upsertLabel(sourceFilter, v) { changed = true logger.WithField(logfields.Object, logfields.Repr(v)).Debug("Assigning information label") } } - o.OrchestrationInfo.deleteUnMarked(keepers) + o.OrchestrationInfo.deleteUnMarked(sourceFilter, keepers) return changed } -func (o *OpLabels) ReplaceIdentityLabels(l Labels, logger *logrus.Entry) bool { +func (o *OpLabels) ReplaceIdentityLabels(sourceFilter string, l Labels, logger *logrus.Entry) bool { changed := false keepers := make(keepMarks) @@ -141,13 +141,13 @@ func (o *OpLabels) ReplaceIdentityLabels(l Labels, logger *logrus.Entry) bool { // A disabled identity label stays disabled without value updates if _, found := o.Disabled[k]; found { disabledKeepers.set(k) - } else if keepers.set(v.Key); o.OrchestrationIdentity.upsertLabel(v) { + } else if keepers.set(v.Key); o.OrchestrationIdentity.upsertLabel(sourceFilter, v) { logger.WithField(logfields.Object, logfields.Repr(v)).Debug("Assigning security relevant label") changed = true } } - if o.OrchestrationIdentity.deleteUnMarked(keepers) || o.Disabled.deleteUnMarked(disabledKeepers) { + if o.OrchestrationIdentity.deleteUnMarked(sourceFilter, keepers) || o.Disabled.deleteUnMarked(sourceFilter, disabledKeepers) { changed = true } @@ -201,25 +201,40 @@ func (o *OpLabels) ModifyIdentityLabels(addLabels, delLabels Labels) (changed bo // upsertLabel updates or inserts 'label' in 'l', but only if exactly the same label // was not already in 'l'. Returns 'true' if a label was added, or an old label was // updated, 'false' otherwise. -func (l Labels) upsertLabel(label Label) bool { +// The label is only updated if its source matches the provided 'sourceFilter' +// or in case the provided sourceFilter is 'LabelSourceAny'. The new label must +// also match the old label 'source' in order for it to be replaced. +func (l Labels) upsertLabel(sourceFilter string, label Label) bool { oldLabel, found := l[label.Key] if found { + if sourceFilter != LabelSourceAny && sourceFilter != oldLabel.Source { + return false + } + // Key is the same, check if Value and Source are also the same if label.Value == oldLabel.Value && label.Source == oldLabel.Source { return false // No change } + + // If the label is not from the same source, then don't replace it. + if oldLabel.Source != label.Source { + return false + } } + // Insert or replace old label l[label.Key] = label return true } // deleteUnMarked deletes the labels which have not been marked for keeping. +// The labels are only deleted if their source matches the provided sourceFilter +// or in case the provided sourceFilter is 'LabelSourceAny'. // Returns true if any of them were deleted. -func (l Labels) deleteUnMarked(marks keepMarks) bool { +func (l Labels) deleteUnMarked(sourceFilter string, marks keepMarks) bool { deleted := false - for k := range l { - if _, keep := marks[k]; !keep { + for k, v := range l { + if _, keep := marks[k]; !keep && (sourceFilter == LabelSourceAny || sourceFilter == v.Source) { delete(l, k) deleted = true } diff --git a/vendor/github.com/cilium/cilium/pkg/lock/map.go b/vendor/github.com/cilium/cilium/pkg/lock/map.go new file mode 100644 index 0000000000..f54702a494 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/lock/map.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package lock + +import "sync" + +// Map is a thin generic wrapper around sync.Map. The sync.Map description from +// the standard library follows (and is also propagated to the corresponding +// methods) for users' convenience: +// +// Map is like a Go map[interface{}]interface{} but is safe for concurrent use +// by multiple goroutines without additional locking or coordination. +// Loads, stores, and deletes run in amortized constant time. +// +// The Map type is specialized. Most code should use a plain Go map instead, +// with separate locking or coordination, for better type safety and to make it +// easier to maintain other invariants along with the map content. +// +// The Map type is optimized for two common use cases: (1) when the entry for a given +// key is only ever written once but read many times, as in caches that only grow, +// or (2) when multiple goroutines read, write, and overwrite entries for disjoint +// sets of keys. In these two cases, use of a Map may significantly reduce lock +// contention compared to a Go map paired with a separate Mutex or RWMutex. +// +// The zero Map is empty and ready for use. A Map must not be copied after first use. +type Map[K comparable, V any] sync.Map + +// MapCmpValues is an extension of Map, which additionally wraps the two extra +// methods requiring values to be also of comparable type. +type MapCmpValues[K, V comparable] Map[K, V] + +// Load returns the value stored in the map for a key, or the zero value if no +// value is present. The ok result indicates whether value was found in the map. +func (m *Map[K, V]) Load(key K) (value V, ok bool) { + val, ok := (*sync.Map)(m).Load(key) + return m.convert(val, ok) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { + val, loaded := (*sync.Map)(m).LoadOrStore(key, value) + return val.(V), loaded +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any +// (zero value otherwise). The loaded result reports whether the key was present. +func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) { + val, loaded := (*sync.Map)(m).LoadAndDelete(key) + return m.convert(val, loaded) +} + +// Store sets the value for a key. +func (m *Map[K, V]) Store(key K, value V) { + (*sync.Map)(m).Store(key, value) +} + +// Swap swaps the value for a key and returns the previous value if any (zero +// value otherwise). The loaded result reports whether the key was present. +func (m *Map[K, V]) Swap(key K, value V) (previous V, loaded bool) { + val, loaded := (*sync.Map)(m).Swap(key, value) + return m.convert(val, loaded) +} + +// Delete deletes the value for a key. +func (m *Map[K, V]) Delete(key K) { + (*sync.Map)(m).Delete(key) +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently (including by f), Range may reflect any +// mapping for that key from any point during the Range call. Range does not +// block other methods on the receiver; even f itself may call any method on m. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *Map[K, V]) Range(f func(key K, value V) bool) { + (*sync.Map)(m).Range(func(key, value any) bool { + return f(key.(K), value.(V)) + }) +} + +// CompareAndDelete deletes the entry for key if its value is equal to old. +// If there is no current value for key in the map, CompareAndDelete returns false +// (even if the old value is the nil interface value). +func (m *MapCmpValues[K, V]) CompareAndDelete(key K, old V) (deleted bool) { + return (*sync.Map)(m).CompareAndDelete(key, old) +} + +// CompareAndSwap swaps the old and new values for key if the value stored in +// the map is equal to old. +func (m *MapCmpValues[K, V]) CompareAndSwap(key K, old, new V) bool { + return (*sync.Map)(m).CompareAndSwap(key, old, new) +} + +func (m *Map[K, V]) convert(value any, ok bool) (V, bool) { + if !ok { + return *new(V), false + } + + return value.(V), true +} diff --git a/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go b/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go index 436522029b..f4810989be 100644 --- a/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go +++ b/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go @@ -53,8 +53,8 @@ const ( // Labels are any label, they may not be relevant to the security identity. Labels = "labels" - // Source is the label or node information source - Source = "source" + // SourceFilter is the label or node information source + SourceFilter = "sourceFilter" // Controller is the name of the controller to log it. Controller = "controller" diff --git a/vendor/github.com/cilium/cilium/pkg/logging/logging.go b/vendor/github.com/cilium/cilium/pkg/logging/logging.go index ccb7fb1353..2c8f1351ac 100644 --- a/vendor/github.com/cilium/cilium/pkg/logging/logging.go +++ b/vendor/github.com/cilium/cilium/pkg/logging/logging.go @@ -41,7 +41,7 @@ const ( // DefaultLogger is the base logrus logger. It is different from the logrus // default to avoid external dependencies from writing out unexpectedly -var DefaultLogger = InitializeDefaultLogger() +var DefaultLogger = initializeDefaultLogger() func initializeKLog() { log := DefaultLogger.WithField(logfields.LogSubsys, "klog") @@ -73,8 +73,9 @@ func initializeKLog() { // LogOptions maps configuration key-value pairs related to logging. type LogOptions map[string]string -// InitializeDefaultLogger returns a logrus Logger with a custom text formatter. -func InitializeDefaultLogger() (logger *logrus.Logger) { +// initializeDefaultLogger returns a logrus Logger with the default logging +// settings. +func initializeDefaultLogger() (logger *logrus.Logger) { logger = logrus.New() logger.SetFormatter(GetFormatter(DefaultLogFormat)) logger.SetLevel(DefaultLogLevel) diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/collections/product.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/collections/product.go new file mode 100644 index 0000000000..28c24f7f24 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/collections/product.go @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package collections + +// CartesianProduct returns the cartesian product of the input vectors as +// a vector of vectors, each with length the same as the number of input vectors. +func CartesianProduct[T any](vs ...[]T) [][]T { + if len(vs) == 0 { + return [][]T{} + } + + dimension := len(vs) // Each output will be a vector of this length. + // Iterate to find out the number of output vectors. + size := len(vs[0]) + for i := 1; i < len(vs); i++ { + size *= len(vs[i]) + } + + // Allocate the output vectors. + dst := make([][]T, size) + for i := range dst { + dst[i] = make([]T, dimension) + } + + lastm := 1 + for i := 0; i < dimension; i++ { + permuteColumn[T](dst, i, lastm, vs[i]) + lastm = lastm * len(vs[i]) + } + return dst +} + +// permuteColumn fills in the nth column of the output vectors of the cartesian +// product of the input vectors. +// +// leftPermSize is the number of vectors as a result of permuting 0,..,col-1 columns. +// That is, this is the block size upon which we will repeat the values of v0 such that +// every previous permutation is again permuted with each value of v0. +// +// For ex. +// CartesianProduct[string]({"a", "b"}, {"x", "y", "z"}) +// +// Iteration (i.e. col, leftPermSize=1) 1: +// +// dst = [ +// ["a"], +// ["b"], +// ["a"] +// ["b"] +// ["a"] +// ["b"] +// ] +// +// Iteration (leftPermSize=2): +// +// dst = [ +// ["a", "x"], // <- each elem of vec is repeated leftPermSize times. +// ["b", "x"], +// ["a", "y"] +// ["b", "y"] +// ["a", "z"] +// ["b", "z"] +// ] +func permuteColumn[T any](dst [][]T, col int, leftPermSize int, vec []T) { + // Go down the column with the current lhs. + // You want to skip along, lastm elements at a time. + for i := 0; i < len(dst); i += leftPermSize { // So we're skipping n rows at a time, + vi := (i / leftPermSize) % len(vec) + for off := 0; off < leftPermSize; off++ { // this is a repeat + dst[i+off][col] = vec[vi] + } + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go index 4755a468dc..bcbf938c6e 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go @@ -60,7 +60,9 @@ func (c *counter) Add(val float64) { } } -func NewCounterVec(opts CounterOpts, labelNames []string) DeletableVec[Counter] { +// NewCounterVec creates a new DeletableVec[Counter] based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *counterVec { return &counterVec{ CounterVec: prometheus.NewCounterVec(opts.toPrometheus(), labelNames), metric: metric{ @@ -70,12 +72,47 @@ func NewCounterVec(opts CounterOpts, labelNames []string) DeletableVec[Counter] } } +// NewCounterVecWithLabels creates a new DeletableVec[Counter] based on the provided CounterOpts and +// partitioned by the given labels. +// This will also initialize the labels with the provided values so that metrics with known label value +// ranges can be pre-initialized to zero upon init. +// +// This should only be used when all label values are known at init, otherwise use of the +// metric vector with uninitialized labels will result in warnings. +// +// Note: Disabled metrics will not have their label values initialized. +// +// For example: +// +// NewCounterVecWithLabels(CounterOpts{ +// Namespace: "cilium", +// Subsystem: "subsystem", +// Name: "cilium_test", +// Disabled: false, +// }, Labels{ +// {Name: "foo", Values: NewValues("0", "1")}, +// {Name: "bar", Values: NewValues("a", "b")}, +// }) +// +// Will initialize the following metrics to: +// +// cilium_subsystem_cilium_test{foo="0", bar="a"} 0 +// cilium_subsystem_cilium_test{foo="0", bar="b"} 0 +// cilium_subsystem_cilium_test{foo="1", bar="a"} 0 +// cilium_subsystem_cilium_test{foo="1", bar="b"} 0 +func NewCounterVecWithLabels(opts CounterOpts, labels Labels) *counterVec { + cv := NewCounterVec(opts, labels.labelNames()) + initLabels[Counter](&cv.metric, labels, cv, opts.Disabled) + return cv +} + type counterVec struct { *prometheus.CounterVec metric } func (cv *counterVec) CurryWith(labels prometheus.Labels) (Vec[Counter], error) { + cv.checkLabels(labels) vec, err := cv.CounterVec.CurryWith(labels) if err == nil { return &counterVec{CounterVec: vec, metric: cv.metric}, nil @@ -118,6 +155,7 @@ func (cv *counterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { } func (cv *counterVec) With(labels prometheus.Labels) Counter { + cv.checkLabels(labels) if !cv.enabled { return &counter{ metric: metric{enabled: false}, @@ -132,6 +170,7 @@ func (cv *counterVec) With(labels prometheus.Labels) Counter { } func (cv *counterVec) WithLabelValues(lvs ...string) Counter { + cv.checkLabelValues(lvs...) if !cv.enabled { return &counter{ metric: metric{enabled: false}, diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go index 445afde06d..083caef554 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go @@ -95,14 +95,51 @@ func (g *gauge) SetToCurrentTime() { } } -func NewGaugeVec(opts GaugeOpts, labelNames []string) DeletableVec[Gauge] { - return &gaugeVec{ +// NewGaugeVec creates a new DeletableVec[Gauge] based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *gaugeVec { + gv := &gaugeVec{ GaugeVec: prometheus.NewGaugeVec(opts.toPrometheus(), labelNames), metric: metric{ enabled: !opts.Disabled, opts: Opts(opts), }, } + return gv +} + +// NewGaugeVecWithLabels creates a new DeletableVec[Gauge] based on the provided CounterOpts and +// partitioned by the given labels. +// This will also initialize the labels with the provided values so that metrics with known label value +// ranges can be pre-initialized to zero upon init. +// +// This should only be used when all label values are known at init, otherwise use of the +// metric vector with uninitialized labels will result in warnings. +// +// Note: Disabled metrics will not have their label values initialized. +// +// For example: +// +// NewGaugeVecWithLabels(GaugeOpts{ +// Namespace: "cilium", +// Subsystem: "subsystem", +// Name: "cilium_test", +// Disabled: false, +// }, Labels{ +// {Name: "foo", Values: NewValues("0", "1")}, +// {Name: "bar", Values: NewValues("a", "b")}, +// }) +// +// Will initialize the following metrics to: +// +// cilium_subsystem_cilium_test{foo="0", bar="a"} 0 +// cilium_subsystem_cilium_test{foo="0", bar="b"} 0 +// cilium_subsystem_cilium_test{foo="1", bar="a"} 0 +// cilium_subsystem_cilium_test{foo="1", bar="b"} 0 +func NewGaugeVecWithLabels(opts GaugeOpts, labels Labels) *gaugeVec { + gv := NewGaugeVec(opts, labels.labelNames()) + initLabels[Gauge](&gv.metric, labels, gv, opts.Disabled) + return gv } type gaugeVec struct { @@ -111,6 +148,7 @@ type gaugeVec struct { } func (gv *gaugeVec) CurryWith(labels prometheus.Labels) (Vec[Gauge], error) { + gv.checkLabels(labels) vec, err := gv.GaugeVec.CurryWith(labels) if err == nil { return &gaugeVec{GaugeVec: vec, metric: gv.metric}, nil @@ -158,6 +196,7 @@ func (gv *gaugeVec) With(labels prometheus.Labels) Gauge { metric: metric{enabled: false}, } } + gv.checkLabels(labels) promGauge := gv.GaugeVec.With(labels) return &gauge{ @@ -167,6 +206,7 @@ func (gv *gaugeVec) With(labels prometheus.Labels) Gauge { } func (gv *gaugeVec) WithLabelValues(lvs ...string) Gauge { + gv.checkLabelValues(lvs...) if !gv.enabled { return &gauge{ metric: metric{enabled: false}, diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go index f1ddb526a2..6d499707d0 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go @@ -71,7 +71,9 @@ func (o *observer) Observe(val float64) { } } -func NewHistogramVec(opts HistogramOpts, labelNames []string) Vec[Observer] { +// NewHistogramVec creates a new Vec[Observer] (i.e. Histogram Vec) based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *histogramVec { return &histogramVec{ ObserverVec: prometheus.NewHistogramVec(opts.toPrometheus(), labelNames), metric: metric{ @@ -81,12 +83,28 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) Vec[Observer] { } } +// NewHistogramVec creates a new Vec[Observer] based on the provided CounterOpts and +// partitioned by the given labels. +// This will also initialize the labels with the provided values so that metrics with known label value +// ranges can be pre-initialized to zero upon init. +// +// This should only be used when all label values are known at init, otherwise use of the +// metric vector with uninitialized labels will result in warnings. +// +// Note: Disabled metrics will not have their label values initialized. +func NewHistogramVecWithLabels(opts HistogramOpts, labels Labels) *histogramVec { + hv := NewHistogramVec(opts, labels.labelNames()) + initLabels(&hv.metric, labels, hv, opts.Disabled) + return hv +} + type histogramVec struct { prometheus.ObserverVec metric } func (cv *histogramVec) CurryWith(labels prometheus.Labels) (Vec[Observer], error) { + cv.checkLabels(labels) vec, err := cv.ObserverVec.CurryWith(labels) if err == nil { return &histogramVec{ObserverVec: vec, metric: cv.metric}, nil @@ -134,6 +152,7 @@ func (cv *histogramVec) With(labels prometheus.Labels) Observer { metric: metric{enabled: false}, } } + cv.checkLabels(labels) promObserver := cv.ObserverVec.With(labels) return &observer{ @@ -148,6 +167,7 @@ func (cv *histogramVec) WithLabelValues(lvs ...string) Observer { metric: metric{enabled: false}, } } + cv.checkLabelValues(lvs...) promObserver := cv.ObserverVec.WithLabelValues(lvs...) return &observer{ diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go index 6ad520e0c6..c08e9aae4a 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go @@ -4,9 +4,18 @@ package metric import ( + "fmt" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" + + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/metrics/metric/collections" ) +var logger = logrus.WithField(logfields.LogSubsys, "metric") + // WithMetadata is the interface implemented by any metric defined in this package. These typically embed existing // prometheus metric types and add additional metadata. In addition, these metrics have the concept of being enabled // or disabled which is used in place of conditional registration so all metric types can always be registered. @@ -20,6 +29,44 @@ type WithMetadata interface { type metric struct { enabled bool opts Opts + labels *labelSet +} + +// forEachLabelVector performs a product of all possible label value combinations +// and calls the provided function for each combination. +func (b *metric) forEachLabelVector(fn func(lvls []string)) { + if b.labels == nil { + return + } + var labelValues [][]string + for _, label := range b.labels.lbls { + labelValues = append(labelValues, maps.Keys(label.Values)) + } + for _, labelVector := range collections.CartesianProduct(labelValues...) { + fn(labelVector) + } +} + +// checkLabelValues checks that the provided label values are within the range +// of provided label values, if labels where defined using the Labels type. +// Violations are logged as errors for detection, but metrics should still +// be collected as is. +func (b *metric) checkLabelValues(lvs ...string) { + if b.labels == nil { + return + } + if err := b.labels.checkLabelValues(lvs); err != nil { + logger.WithError(err).Error("metric label constraints violated") + } +} + +func (b *metric) checkLabels(labels prometheus.Labels) { + if b.labels == nil { + return + } + if err := b.labels.checkLabels(labels); err != nil { + logger.WithError(err).Error("metric label constraints violated") + } } func (b *metric) IsEnabled() bool { @@ -196,3 +243,90 @@ func (b Opts) GetConfigName() string { } return b.ConfigName } + +// Label represents a metric label with a pre-defined range of values. +// This is used with the NewxxxVecWithLabels metrics constructors to initialize +// vector metrics with known label value ranges, avoiding empty metrics. +type Label struct { + Name string + // If defined, only these values are allowed. + Values Values +} + +// Values is a distinct set of possible label values for a particular Label. +type Values map[string]struct{} + +// NewValues constructs a Values type from a set of strings. +func NewValues(vs ...string) Values { + vals := Values{} + for _, v := range vs { + vals[v] = struct{}{} + } + return vals +} + +// Labels is a slice of labels that represents a label set for a vector type +// metric. +type Labels []Label + +func (lbls Labels) labelNames() []string { + lns := make([]string, len(lbls)) + for i, label := range lbls { + lns[i] = label.Name + } + return lns +} + +type labelSet struct { + lbls Labels + m map[string]map[string]struct{} +} + +func (l *labelSet) namesToValues() map[string]map[string]struct{} { + if l.m != nil { + return l.m + } + l.m = make(map[string]map[string]struct{}) + for _, label := range l.lbls { + l.m[label.Name] = label.Values + } + return l.m +} + +func (l *labelSet) checkLabels(labels prometheus.Labels) error { + for name, value := range labels { + if lvs, ok := l.namesToValues()[name]; ok { + if _, ok := lvs[value]; !ok { + return fmt.Errorf("value %s not allowed for label %s (should be one of %v)", value, name, lvs) + } + } else { + return fmt.Errorf("invalid label name: %s", name) + } + } + return nil +} + +func (l *labelSet) checkLabelValues(lvs []string) error { + if len(l.lbls) != len(lvs) { + return fmt.Errorf("invalid labels") + } + for i, label := range l.lbls { + if _, ok := label.Values[lvs[i]]; !ok { + return fmt.Errorf("invalid label value") + } + } + return nil +} + +// initLabels is a helper function to initialize the labels of a metric. +// It is used by xxxVecWithLabels metrics constructors to initialize the +// labels of the metric and the vector (i.e. registering all possible label value combinations). +func initLabels[T any](m *metric, labels Labels, vec Vec[T], disabled bool) { + if disabled { + return + } + m.labels = &labelSet{lbls: labels} + m.forEachLabelVector(func(vs []string) { + vec.WithLabelValues(vs...) + }) +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go b/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go index 45fef9c201..8ff4ae1c97 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go @@ -20,6 +20,7 @@ import ( "github.com/cilium/cilium/api/v1/models" "github.com/cilium/cilium/pkg/metrics/metric" "github.com/cilium/cilium/pkg/promise" + "github.com/cilium/cilium/pkg/source" "github.com/cilium/cilium/pkg/time" "github.com/cilium/cilium/pkg/version" ) @@ -96,12 +97,25 @@ const ( // Labels + // LabelValueFalse is the string value for true metric label values. + LabelValueTrue = "true" + + // LabelValueFalse is the string value for false metric label values. + LabelValueFalse = "false" + // LabelValueOutcomeSuccess is used as a successful outcome of an operation LabelValueOutcomeSuccess = "success" // LabelValueOutcomeFail is used as an unsuccessful outcome of an operation LabelValueOutcomeFail = "fail" + // LabelValueOutcomeFailure is used as an unsuccessful outcome of an operation. + // NOTE: This should only be used for existing metrics, new metrics should use LabelValueOutcomeFail. + LabelValueOutcomeFailure = "failure" + + // LabelDropReason is used to describe reason for dropping a packets/bytes + LabelDropReason = "reason" + // LabelEventSourceAPI marks event-related metrics that come from the API LabelEventSourceAPI = "api" @@ -142,6 +156,8 @@ const ( // LabelPolicySource is the label used to see the enforcement status LabelPolicySource = "source" + LabelSource = "source" + // LabelScope is the label used to defined multiples scopes in the same // metric. For example, one counter may measure a metric over the scope of // the entire event (scope=global), or just part of an event @@ -221,6 +237,12 @@ const ( LabelLocationRemoteIntraCluster = "remote_intra_cluster" LabelLocationRemoteInterCluster = "remote_inter_cluster" + // Rule label is a label for a L7 rule name. + LabelL7Rule = "rule" + + // LabelL7ProxyType is the label for denoting a L7 proxy type. + LabelL7ProxyType = "proxy_type" + // LabelType is the label for type in general (e.g. endpoint, node) LabelType = "type" LabelPeerEndpoint = "endpoint" @@ -235,6 +257,9 @@ const ( ) var ( + // LabelValuesBool is metric label value set for boolean type. + LabelValuesBool = metric.NewValues(LabelValueTrue, LabelValueFalse) + // Namespace is used to scope metrics from cilium. It is prepended to metric // names and separated with a '_' Namespace = CiliumAgentNamespace @@ -330,11 +355,9 @@ var ( // Events - // EventTS*is the time in seconds since epoch that we last received an - // event that we will handle - // source is one of k8s, docker or apia - - // EventTS is the timestamp of k8s resource events. + // EventTS is the time in seconds since epoch that we last received an + // event that was handled by Cilium. This metric tracks the source of the + // event which can be one of K8s or Cilium's API. EventTS = NoOpGaugeVec // EventLagK8s is the lag calculation for k8s Pod events. @@ -358,22 +381,6 @@ var ( // L3-L4 statistics - // DropCount is the total drop requests, - // tagged by drop reason and direction(ingress/egress) - DropCount = NoOpCounterVec - - // DropBytes is the total dropped bytes, - // tagged by drop reason and direction(ingress/egress) - DropBytes = NoOpCounterVec - - // ForwardCount is the total forwarded packets, - // tagged by ingress/egress direction - ForwardCount = NoOpCounterVec - - // ForwardBytes is the total forwarded bytes, - // tagged by ingress/egress direction - ForwardBytes = NoOpCounterVec - // Datapath statistics // ConntrackGCRuns is the number of times that the conntrack GC @@ -658,10 +665,6 @@ type LegacyMetrics struct { ProxyPolicyL7Total metric.Vec[metric.Counter] ProxyUpstreamTime metric.Vec[metric.Observer] ProxyDatapathUpdateTimeout metric.Counter - DropCount metric.Vec[metric.Counter] - DropBytes metric.Vec[metric.Counter] - ForwardCount metric.Vec[metric.Counter] - ForwardBytes metric.Vec[metric.Counter] ConntrackGCRuns metric.Vec[metric.Counter] ConntrackGCKeyFallbacks metric.Vec[metric.Counter] ConntrackGCSize metric.Vec[metric.Gauge] @@ -735,13 +738,18 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Duration of processed API calls labeled by path, method and return code.", }, []string{LabelPath, LabelMethod, LabelAPIReturnCode}), - EndpointRegenerationTotal: metric.NewCounterVec(metric.CounterOpts{ + EndpointRegenerationTotal: metric.NewCounterVecWithLabels(metric.CounterOpts{ ConfigName: Namespace + "_endpoint_regenerations_total", Namespace: Namespace, Name: "endpoint_regenerations_total", Help: "Count of all endpoint regenerations that have completed, tagged by outcome", - }, []string{"outcome"}), + }, metric.Labels{ + { + Name: LabelOutcome, + Values: metric.NewValues(LabelValueOutcomeSuccess, LabelValueOutcomeFailure), + }, + }), EndpointStateCount: metric.NewGaugeVec(metric.GaugeOpts{ ConfigName: Namespace + "_endpoint_state", @@ -788,13 +796,18 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Highest policy revision number in the agent", }), - PolicyChangeTotal: metric.NewCounterVec(metric.CounterOpts{ + PolicyChangeTotal: metric.NewCounterVecWithLabels(metric.CounterOpts{ ConfigName: Namespace + "_policy_change_total", Namespace: Namespace, Name: "policy_change_total", Help: "Number of policy changes by outcome", - }, []string{"outcome"}), + }, metric.Labels{ + { + Name: LabelOutcome, + Values: metric.NewValues(LabelValueOutcomeSuccess, LabelValueOutcomeFailure), + }, + }), PolicyEndpointStatus: metric.NewGaugeVec(metric.GaugeOpts{ ConfigName: Namespace + "_policy_endpoint_enforcement_status", @@ -804,13 +817,18 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of endpoints labeled by policy enforcement status", }, []string{LabelPolicyEnforcement}), - PolicyImplementationDelay: metric.NewHistogramVec(metric.HistogramOpts{ + PolicyImplementationDelay: metric.NewHistogramVecWithLabels(metric.HistogramOpts{ ConfigName: Namespace + "_policy_implementation_delay", Namespace: Namespace, Name: "policy_implementation_delay", Help: "Time between a policy change and it being fully deployed into the datapath", - }, []string{LabelPolicySource}), + }, metric.Labels{ + { + Name: LabelPolicySource, + Values: metric.NewValues(string(source.Kubernetes), string(source.CustomResource), string(source.LocalAPI)), + }, + }), CIDRGroupsReferenced: metric.NewGauge(metric.GaugeOpts{ ConfigName: Namespace + "cidrgroups_referenced", @@ -841,7 +859,7 @@ func NewLegacyMetrics() *LegacyMetrics { ConfigName: Namespace + "_event_ts", Namespace: Namespace, Name: "event_ts", - Help: "Last timestamp when we received an event", + Help: "Last timestamp when Cilium received an event from a control plane source, per resource and per action", }, []string{LabelEventSource, LabelScope, LabelAction}), EventLagK8s: metric.NewGauge(metric.GaugeOpts{ @@ -861,12 +879,21 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of redirects installed for endpoints, labeled by protocol", }, []string{LabelProtocolL7}), - ProxyPolicyL7Total: metric.NewCounterVec(metric.CounterOpts{ + ProxyPolicyL7Total: metric.NewCounterVecWithLabels(metric.CounterOpts{ ConfigName: Namespace + "_policy_l7_total", Namespace: Namespace, Name: "policy_l7_total", Help: "Number of total proxy requests handled", - }, []string{"rule", "proxy_type"}), + }, metric.Labels{ + { + Name: LabelL7Rule, + Values: metric.NewValues("received", "forwarded", "denied", "parse_errors"), + }, + { + Name: LabelL7ProxyType, + Values: metric.NewValues("fqdn", "envoy"), + }, + }), ProxyUpstreamTime: metric.NewHistogramVec(metric.HistogramOpts{ ConfigName: Namespace + "_proxy_upstream_reply_seconds", @@ -884,38 +911,6 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of total datapath update timeouts due to FQDN IP updates", }), - DropCount: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_drop_count_total", - Namespace: Namespace, - Name: "drop_count_total", - Help: "Total dropped packets, tagged by drop reason and ingress/egress direction", - }, - []string{"reason", LabelDirection}), - - DropBytes: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_drop_bytes_total", - Namespace: Namespace, - Name: "drop_bytes_total", - Help: "Total dropped bytes, tagged by drop reason and ingress/egress direction", - }, - []string{"reason", LabelDirection}), - - ForwardCount: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_forward_count_total", - Namespace: Namespace, - Name: "forward_count_total", - Help: "Total forwarded packets, tagged by ingress/egress direction", - }, - []string{LabelDirection}), - - ForwardBytes: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_forward_bytes_total", - Namespace: Namespace, - Name: "forward_bytes_total", - Help: "Total forwarded bytes, tagged by ingress/egress direction", - }, - []string{LabelDirection}), - ConntrackGCRuns: metric.NewCounterVec(metric.CounterOpts{ ConfigName: Namespace + "_" + SubsystemDatapath + "_conntrack_gc_runs_total", Namespace: Namespace, @@ -1014,19 +1009,51 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of times that Cilium has started a subprocess, labeled by subsystem", }, []string{LabelSubsystem}), - KubernetesEventProcessed: metric.NewCounterVec(metric.CounterOpts{ + KubernetesEventProcessed: metric.NewCounterVecWithLabels(metric.CounterOpts{ ConfigName: Namespace + "_kubernetes_events_total", Namespace: Namespace, Name: "kubernetes_events_total", Help: "Number of Kubernetes events processed labeled by scope, action and execution result", - }, []string{LabelScope, LabelAction, LabelStatus}), + }, + metric.Labels{ + { + Name: LabelScope, + Values: metric.NewValues("CiliumNetworkPolicy", "CiliumClusterwideNetworkPolicy", "NetworkPolicy"), + }, + { + Name: LabelAction, + Values: metric.NewValues("update", "delete"), + }, + { + Name: LabelStatus, + Values: metric.NewValues("success", "failed"), + }, + }, + ), - KubernetesEventReceived: metric.NewCounterVec(metric.CounterOpts{ + KubernetesEventReceived: metric.NewCounterVecWithLabels(metric.CounterOpts{ ConfigName: Namespace + "_kubernetes_events_received_total", Namespace: Namespace, Name: "kubernetes_events_received_total", Help: "Number of Kubernetes events received labeled by scope, action, valid data and equalness", - }, []string{LabelScope, LabelAction, "valid", "equal"}), + }, metric.Labels{ + { + Name: LabelScope, + Values: metric.NewValues("CiliumNetworkPolicy", "CiliumClusterwideNetworkPolicy", "NetworkPolicy"), + }, + { + Name: LabelAction, + Values: metric.NewValues("update", "delete"), + }, + { + Name: "valid", + Values: LabelValuesBool, + }, + { + Name: "equal", + Values: LabelValuesBool, + }, + }), KubernetesAPIInteractions: metric.NewHistogramVec(metric.HistogramOpts{ ConfigName: Namespace + "_" + SubsystemK8sClient + "_api_latency_time_seconds", @@ -1369,10 +1396,6 @@ func NewLegacyMetrics() *LegacyMetrics { ProxyPolicyL7Total = lm.ProxyPolicyL7Total ProxyUpstreamTime = lm.ProxyUpstreamTime ProxyDatapathUpdateTimeout = lm.ProxyDatapathUpdateTimeout - DropCount = lm.DropCount - DropBytes = lm.DropBytes - ForwardCount = lm.ForwardCount - ForwardBytes = lm.ForwardBytes ConntrackGCRuns = lm.ConntrackGCRuns ConntrackGCKeyFallbacks = lm.ConntrackGCKeyFallbacks ConntrackGCSize = lm.ConntrackGCSize @@ -1487,11 +1510,13 @@ func Reinitialize() { // Register registers a collector func Register(c prometheus.Collector) error { + var err error + withRegistry(func(reg *Registry) { - reg.Register(c) + err = reg.Register(c) }) - return nil + return err } // RegisterList registers a list of collectors. If registration of one diff --git a/vendor/github.com/cilium/cilium/pkg/monitor/api/drop.go b/vendor/github.com/cilium/cilium/pkg/monitor/api/drop.go index 8c2371eb99..c1aaa1a790 100644 --- a/vendor/github.com/cilium/cilium/pkg/monitor/api/drop.go +++ b/vendor/github.com/cilium/cilium/pkg/monitor/api/drop.go @@ -93,6 +93,7 @@ var errors = map[uint8]string{ 195: "Traffic is unencrypted", 196: "TTL exceeded", 197: "No node ID found", + 198: "Rate limited", } func extendedReason(reason uint8, extError int8) string { diff --git a/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go b/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go index f75d584727..64a4b94c19 100644 --- a/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go +++ b/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go @@ -3,6 +3,10 @@ package addressing +import ( + "net" +) + // AddressType represents a type of IP address for a node. They are copied // from k8s.io/api/core/v1/types.go to avoid pulling in a lot of Kubernetes // imports into this package.s @@ -16,3 +20,46 @@ const ( NodeInternalDNS AddressType = "InternalDNS" NodeCiliumInternalIP AddressType = "CiliumInternalIP" ) + +type Address interface { + AddrType() AddressType + ToString() string +} + +// ExtractNodeIP returns one of the provided IP addresses available with the following priority: +// - NodeInternalIP +// - NodeExternalIP +// - other IP address type +// An error is returned if ExtractNodeIP fails to get an IP based on the provided address family. +func ExtractNodeIP[T Address](addrs []T, ipv6 bool) net.IP { + var backupIP net.IP + for _, addr := range addrs { + parsed := net.ParseIP(addr.ToString()) + if parsed == nil { + continue + } + if (ipv6 && parsed.To4() != nil) || + (!ipv6 && parsed.To4() == nil) { + continue + } + switch addr.AddrType() { + // Ignore CiliumInternalIPs + case NodeCiliumInternalIP: + continue + // Always prefer a cluster internal IP + case NodeInternalIP: + return parsed + case NodeExternalIP: + // Fall back to external Node IP + // if no internal IP could be found + backupIP = parsed + default: + // As a last resort, if no internal or external + // IP was found, use any node address available + if backupIP == nil { + backupIP = parsed + } + } + } + return backupIP +} diff --git a/vendor/github.com/cilium/cilium/pkg/option/config.go b/vendor/github.com/cilium/cilium/pkg/option/config.go index f642f1c1e1..63375f038d 100644 --- a/vendor/github.com/cilium/cilium/pkg/option/config.go +++ b/vendor/github.com/cilium/cilium/pkg/option/config.go @@ -260,7 +260,7 @@ const ( NodePortAlg = "node-port-algorithm" // NodePortAcceleration indicates whether NodePort should be accelerated - // via XDP ("none", "generic" or "native") + // via XDP ("none", "generic", "native", or "best-effort") NodePortAcceleration = "node-port-acceleration" // Alias to NodePortMode @@ -308,8 +308,6 @@ const ( // EnableSessionAffinity enables a support for service sessionAffinity EnableSessionAffinity = "enable-session-affinity" - EnableServiceTopology = "enable-service-topology" - // EnableIdentityMark enables setting the mark field with the identity for // local traffic. This may be disabled if chaining modes and Cilium use // conflicting marks. @@ -324,12 +322,6 @@ const ( // considered local ones with HOST_ID in the ipcache AddressScopeMax = "local-max-addr-scope" - // EnableBandwidthManager enables EDT-based pacing - EnableBandwidthManager = "enable-bandwidth-manager" - - // EnableBBR enables BBR TCP congestion control for the node including Pods - EnableBBR = "enable-bbr" - // EnableRecorder enables the datapath pcap recorder EnableRecorder = "enable-recorder" @@ -400,11 +392,6 @@ const ( // to skip netfilter connection tracking on all pod traffic. InstallNoConntrackIptRules = "install-no-conntrack-iptables-rules" - IPTablesLockTimeout = "iptables-lock-timeout" - - // IPTablesRandomFully sets iptables flag random-fully on masquerading rules - IPTablesRandomFully = "iptables-random-fully" - // IPv6NodeAddr is the IPv6 address of node IPv6NodeAddr = "ipv6-node" @@ -512,25 +499,20 @@ const ( // BPFSocketLBHostnsOnly is the name of the BPFSocketLBHostnsOnly option BPFSocketLBHostnsOnly = "bpf-lb-sock-hostns-only" - // TunnelName is the name of the Tunnel option - TunnelName = "tunnel" - // RoutingMode is the name of the option to choose between native routing and tunneling mode RoutingMode = "routing-mode" - // TunnelProtocol is the name of the option to select the tunneling protocol - TunnelProtocol = "tunnel-protocol" + // ServiceNoBackendResponse is the name of the option to pick how to handle traffic for services + // without any backends + ServiceNoBackendResponse = "service-no-backend-response" - // TunnelPortName is the name of the TunnelPort option - TunnelPortName = "tunnel-port" + // ServiceNoBackendResponseReject is the name of the option to reject traffic for services + // without any backends + ServiceNoBackendResponseReject = "reject" - // SingleClusterRouteName is the name of the SingleClusterRoute option - // - // SingleClusterRoute enables use of a single route covering the entire - // cluster CIDR to point to the cilium_host interface instead of using - // a separate route for each cluster node CIDR. This option is not - // compatible with Tunnel=TunnelDisabled - SingleClusterRouteName = "single-cluster-route" + // ServiceNoBackendResponseDrop is the name of the option to drop traffic for services + // without any backends + ServiceNoBackendResponseDrop = "drop" // MaxInternalTimerDelay sets a maximum on all periodic timers in // the agent in order to flush out timer-related bugs in the agent. @@ -666,10 +648,6 @@ const ( // load loggging LogSystemLoadConfigName = "log-system-load" - // PrependIptablesChainsName is the name of the option to enable - // prepending iptables chains instead of appending - PrependIptablesChainsName = "prepend-iptables-chains" - // DisableCiliumEndpointCRDName is the name of the option to disable // use of the CEP CRD DisableCiliumEndpointCRDName = "disable-endpoint-crd" @@ -841,8 +819,13 @@ const ( // endpoints that are no longer alive and healthy. EndpointGCInterval = "endpoint-gc-interval" - // K8sEventHandover is the name of the K8sEventHandover option - K8sEventHandover = "enable-k8s-event-handover" + // This option turns off switching from full pods informer to node's local pods informer + // when CEP CRD is disabled and kvstore is used. + // Switching from full pods informer to node's local pods informer is considered default behaviour + // and this option allows us to change it back to having full pods informer all the time. + // It's meant to be mitigation only in case if endpoint synchronization from kvstore has some bugs + // and we actually need to watch all pods all the time. + LegacyTurnOffK8sEventHandover = "legacy-turn-off-k8s-event-handover" // LoopbackIPv4 is the address to use for service loopback SNAT LoopbackIPv4 = "ipv4-service-loopback-address" @@ -876,9 +859,15 @@ const ( // IPAMMultiPoolPreAllocation defines the pre-allocation value for each IPAM pool IPAMMultiPoolPreAllocation = "ipam-multi-pool-pre-allocation" + // IPAMDefaultIPPool defines the default IP Pool when using multi-pool + IPAMDefaultIPPool = "ipam-default-ip-pool" + // XDPModeNative for loading progs with XDPModeLinkDriver XDPModeNative = "native" + // XDPModeBestEffort for loading progs with XDPModeLinkDriver + XDPModeBestEffort = "best-effort" + // XDPModeGeneric for loading progs with XDPModeLinkGeneric XDPModeGeneric = "testing-only" @@ -930,10 +919,6 @@ const ( // identity allocation IdentityAllocationModeCRD = "crd" - // DisableCNPStatusUpdates disables updating of CNP NodeStatus in the CNP - // CRD. - DisableCNPStatusUpdates = "disable-cnp-status-updates" - // EnableLocalNodeRoute controls installation of the route which points // the allocation prefix of the local node. EnableLocalNodeRoute = "enable-local-node-route" @@ -991,6 +976,10 @@ const ( // HubbleMetrics specifies enabled metrics and their configuration options. HubbleMetrics = "hubble-metrics" + // HubbleFlowlogsConfigFilePath specifies the filepath with configuration of hubble flowlogs. + // e.g. "/etc/cilium/flowlog.yaml" + HubbleFlowlogsConfigFilePath = "hubble-flowlogs-config-path" + // HubbleExportFilePath specifies the filepath to write Hubble events to. // e.g. "/var/run/cilium/hubble/events.log" HubbleExportFilePath = "hubble-export-file-path" @@ -1040,6 +1029,9 @@ const ( // HubbleRedactHttpURLQuery controls if the URL query will be redacted from flows HubbleRedactHttpURLQuery = "hubble-redact-http-urlquery" + // HubbleRedactHttpUserInfo controls if the user info will be redacted from flows + HubbleRedactHttpUserInfo = "hubble-redact-http-userinfo" + // HubbleRedactKafkaApiKey controls if the Kafka API key will be redacted from flows HubbleRedactKafkaApiKey = "hubble-redact-kafka-apikey" @@ -1049,10 +1041,6 @@ const ( // HubbleRedactHttpHeadersDeny controls which http headers will be redacted from flows HubbleRedactHttpHeadersDeny = "hubble-redact-http-headers-deny" - // DisableIptablesFeederRules specifies which chains will be excluded - // when installing the feeder rules - DisableIptablesFeederRules = "disable-iptables-feeder-rules" - // K8sHeartbeatTimeout configures the timeout for apiserver heartbeat K8sHeartbeatTimeout = "k8s-heartbeat-timeout" @@ -1222,18 +1210,6 @@ var ( MonitorAggregationFlagsDefault = []string{"syn", "fin", "rst"} ) -// Available option for DaemonConfig.Tunnel -const ( - // TunnelVXLAN specifies VXLAN encapsulation - TunnelVXLAN = "vxlan" - - // TunnelGeneve specifies Geneve encapsulation - TunnelGeneve = "geneve" - - // TunnelDisabled specifies to disable encapsulation - TunnelDisabled = "disabled" -) - // Available options for DaemonConfig.RoutingMode const ( // RoutingModeNative specifies native routing mode @@ -1294,6 +1270,9 @@ const ( // CNIExclusive tells the agent to remove other CNI configuration files CNIExclusive = "cni-exclusive" + // CNIExternalRouting delegates endpoint routing to the chained CNI plugin. + CNIExternalRouting = "cni-external-routing" + // CNILogFile is the path to a log file (on the host) for the CNI plugin // binary to use for logging. CNILogFile = "cni-log-file" @@ -1351,6 +1330,9 @@ const ( // NodePortAccelerationNative means we accelerate NodePort via native XDP in the driver (preferred) NodePortAccelerationNative = XDPModeNative + // NodePortAccelerationBestEffort means we accelerate NodePort via native XDP in the driver (preferred), but will skip devices without driver support + NodePortAccelerationBestEffort = XDPModeBestEffort + // KubeProxyReplacementPartial specifies to enable only selected kube-proxy // replacement features (might panic) KubeProxyReplacementPartial = "partial" @@ -1379,11 +1361,6 @@ const ( PprofPortAgent = 6060 ) -// GetTunnelModes returns the list of all tunnel modes -func GetTunnelModes() string { - return fmt.Sprintf("%s, %s, %s", TunnelVXLAN, TunnelGeneve, TunnelDisabled) -} - // getEnvName returns the environment variable to be used for the given option name. func getEnvName(option string) string { under := strings.Replace(option, "-", "_", -1) @@ -1456,11 +1433,8 @@ type DaemonConfig struct { // devices. EnableRuntimeDeviceDetection bool - DatapathMode string // Datapath mode - Tunnel string // Tunnel mode - RoutingMode string // Routing mode - TunnelProtocol string // Tunneling protocol - TunnelPort int // Tunnel port + DatapathMode string // Datapath mode + RoutingMode string // Routing mode DryMode bool // Do not create BPF maps, devices, .. @@ -1608,10 +1582,6 @@ type DaemonConfig struct { // RunInterval. Zero means unlimited. MaxControllerInterval int - // UseSingleClusterRoute specifies whether to use a single cluster route - // instead of per-node routes. - UseSingleClusterRoute bool - // HTTPNormalizePath switches on Envoy HTTP path normalization options, which currently // includes RFC 3986 path normalization, Envoy merge slashes option, and unescaping and // redirecting for paths that contain escaped slashes. These are necessary to keep path based @@ -1671,18 +1641,6 @@ type DaemonConfig struct { ProcFs string - // PrependIptablesChains is the name of the option to enable prepending - // iptables chains instead of appending - PrependIptablesChains bool - - // IPTablesLockTimeout defines the "-w" iptables option when the - // iptables CLI is directly invoked from the Cilium agent. - IPTablesLockTimeout time.Duration - - // IPTablesRandomFully defines the "--random-fully" iptables option when the - // iptables CLI is directly invoked from the Cilium agent. - IPTablesRandomFully bool - // K8sNamespace is the name of the namespace in which Cilium is // deployed in when running in Kubernetes mode K8sNamespace string @@ -1810,6 +1768,7 @@ type DaemonConfig struct { KVStoreOpt map[string]string LabelPrefixFile string Labels []string + LegacyTurnOffK8sEventHandover bool LogDriver []string LogOpt map[string]string Logstash bool @@ -1898,14 +1857,6 @@ type DaemonConfig struct { // been reached. DNSProxyConcurrencyProcessingGracePeriod time.Duration - // DNSProxyLockCount is the array size containing mutexes which protect - // against parallel handling of DNS response IPs. - DNSProxyLockCount int - - // DNSProxyLockTimeout is timeout when acquiring the locks controlled by - // DNSProxyLockCount. - DNSProxyLockTimeout time.Duration - // EnableXTSocketFallback allows disabling of kernel's ip_early_demux // sysctl option if `xt_socket` kernel module is not available. EnableXTSocketFallback bool @@ -1992,12 +1943,6 @@ type DaemonConfig struct { // the specified maximum value. ConntrackGCMaxInterval time.Duration - // K8sEventHandover enables use of the kvstore to optimize Kubernetes - // event handling by listening for k8s events in the operator and - // mirroring it into the kvstore for reduced overhead in large - // clusters. - K8sEventHandover bool - // LoopbackIPv4 is the address to use for service loopback SNAT LoopbackIPv4 string @@ -2067,7 +2012,7 @@ type DaemonConfig struct { MaglevHashSeed string // NodePortAcceleration indicates whether NodePort should be accelerated - // via XDP ("none", "generic" or "native") + // via XDP ("none", "generic", "native", or "best-effort") NodePortAcceleration string // NodePortBindProtection rejects bind requests to NodePort service ports @@ -2086,15 +2031,6 @@ type DaemonConfig struct { // considered local ones with HOST_ID in the ipcache AddressScopeMax int - // EnableBandwidthManager enables EDT-based pacing - EnableBandwidthManager bool - - // EnableBBR enables BBR TCP congestion control for the node including Pods - EnableBBR bool - - // ResetQueueMapping resets the Pod's skb queue mapping - ResetQueueMapping bool - // EnableRecorder enables the datapath pcap recorder EnableRecorder bool @@ -2125,8 +2061,6 @@ type DaemonConfig struct { // EnableSessionAffinity enables a support for service sessionAffinity EnableSessionAffinity bool - EnableServiceTopology bool - // Selection of BPF main clock source (ktime vs jiffies) ClockSource BPFClockSource @@ -2158,7 +2092,8 @@ type DaemonConfig struct { // IPAMMultiPoolPreAllocation defines the pre-allocation value for each IPAM pool IPAMMultiPoolPreAllocation map[string]string - + // IPAMDefaultIPPool the default IP Pool when using multi-pool + IPAMDefaultIPPool string // AutoCreateCiliumNodeResource enables automatic creation of a // CiliumNode resource for the local node AutoCreateCiliumNodeResource bool @@ -2184,10 +2119,6 @@ type DaemonConfig struct { // allocation IdentityAllocationMode string - // DisableCNPStatusUpdates disables updating of CNP NodeStatus in the CNP - // CRD. - DisableCNPStatusUpdates bool - // AllowICMPFragNeeded allows ICMP Fragmentation Needed type packets in // the network policy for cilium-agent. AllowICMPFragNeeded bool @@ -2249,6 +2180,10 @@ type DaemonConfig struct { // HubbleMetrics specifies enabled metrics and their configuration options. HubbleMetrics []string + // HubbleFlowlogsConfigFilePath specifies the filepath with configuration of hubble flowlogs. + // e.g. "/etc/cilium/flowlog.yaml" + HubbleFlowlogsConfigFilePath string + // HubbleExportFilePath specifies the filepath to write Hubble events to. // e.g. "/var/run/cilium/hubble/events.log" HubbleExportFilePath string @@ -2298,6 +2233,9 @@ type DaemonConfig struct { // HubbleRedactURLQuery controls if the URL query will be redacted from flows HubbleRedactHttpURLQuery bool + // HubbleRedactUserInfo controls if the user info will be redacted from flows + HubbleRedactHttpUserInfo bool + // HubbleRedactKafkaApiKey controls if Kafka API key will be redacted from flows HubbleRedactKafkaApiKey bool @@ -2311,10 +2249,6 @@ type DaemonConfig struct { // CiliumEndpoint.Status resource EndpointStatus map[string]struct{} - // DisableIptablesFeederRules specifies which chains will be excluded - // when installing the feeder rules - DisableIptablesFeederRules []string - // EnableIPv4FragmentsTracking enables IPv4 fragments tracking for // L4-based lookups. Needs LRU map support. EnableIPv4FragmentsTracking bool @@ -2478,6 +2412,19 @@ type DaemonConfig struct { // - world // - world, remote-node PolicyCIDRMatchMode []string + + // MaxConnectedClusters sets the maximum number of clusters that can be + // connected in a clustermesh. + // The value is used to determine the bit allocation for cluster ID and + // identity in a numeric identity. Values > 255 will decrease the number of + // allocatable identities. + MaxConnectedClusters uint32 + + // ForceDeviceRequired enforces the attachment of BPF programs on native device. + ForceDeviceRequired bool + + // ServiceNoBackendResponse determines how we handle traffic to a service with no backends. + ServiceNoBackendResponse string } var ( @@ -2488,6 +2435,7 @@ var ( Monitor: &models.MonitorStatus{Cpus: int64(runtime.NumCPU()), Npages: 64, Pagesize: int64(os.Getpagesize()), Lost: 0, Unknown: 0}, IPv6ClusterAllocCIDR: defaults.IPv6ClusterAllocCIDR, IPv6ClusterAllocCIDRBase: defaults.IPv6ClusterAllocCIDRBase, + IPAMDefaultIPPool: defaults.IPAMDefaultIPPool, EnableHostIPRestore: defaults.EnableHostIPRestore, EnableHealthChecking: defaults.EnableHealthChecking, EnableEndpointHealthChecking: defaults.EnableEndpointHealthChecking, @@ -2528,6 +2476,7 @@ var ( EnableBGPControlPlane: defaults.EnableBGPControlPlane, EnableK8sNetworkPolicy: defaults.EnableK8sNetworkPolicy, PolicyCIDRMatchMode: defaults.PolicyCIDRMatchMode, + MaxConnectedClusters: defaults.MaxConnectedClusters, } ) @@ -2632,31 +2581,11 @@ func (c *DaemonConfig) TunnelingEnabled() bool { return c.RoutingMode != RoutingModeNative } -// TunnelDevice returns cilium_{vxlan,geneve} depending on the config or "" if disabled. -func (c *DaemonConfig) TunnelDevice() string { - if c.TunnelingEnabled() { - return fmt.Sprintf("cilium_%s", c.TunnelProtocol) - } else { - return "" - } -} - -// TunnelExists returns true if some traffic may go through a tunnel, including -// if the primary mode is native routing. For example, in the egress gateway, -// we may send such traffic to a gateway node via a tunnel. -// In conjunction with the DSR Geneve and the direct routing, traffic from -// intermediate nodes to backend pods go through a tunnel, but the datapath logic -// takes care of the MTU overhead. So no need to take it into account here. -// See encap_geneve_dsr_opt[4,6] in nodeport.h -func (c *DaemonConfig) TunnelExists() bool { - return c.TunnelingEnabled() || c.EgressGatewayCommonEnabled() || c.EnableHighScaleIPcache -} - // AreDevicesRequired returns true if the agent needs to attach to the native // devices to implement some features. func (c *DaemonConfig) AreDevicesRequired() bool { - return c.EnableNodePort || c.EnableHostFirewall || c.EnableBandwidthManager || - c.EnableWireguard || c.EnableHighScaleIPcache || c.EnableL2Announcements + return c.EnableNodePort || c.EnableHostFirewall || c.EnableWireguard || + c.EnableHighScaleIPcache || c.EnableL2Announcements || c.ForceDeviceRequired } // MasqueradingEnabled returns true if either IPv4 or IPv6 masquerading is enabled. @@ -2783,12 +2712,6 @@ func (c *DaemonConfig) K8sGatewayAPIEnabled() bool { return c.EnableGatewayAPI } -// EgressGatewayCommonEnabled returns true if at least one egress gateway implementation -// is enabled. -func (c *DaemonConfig) EgressGatewayCommonEnabled() bool { - return c.EnableIPv4EgressGateway -} - func (c *DaemonConfig) PolicyCIDRMatchesNodes() bool { for _, mode := range c.PolicyCIDRMatchMode { if mode == "nodes" { @@ -2826,6 +2749,11 @@ func (c *DaemonConfig) DirectRoutingDeviceRequired() bool { return c.EnableNodePort || BPFHostRoutingEnabled || Config.EnableWireguard } +func (c *DaemonConfig) LoadBalancerUsesDSR() bool { + return c.NodePortMode == NodePortModeDSR || + c.NodePortMode == NodePortModeHybrid +} + func (c *DaemonConfig) validateIPv6ClusterAllocCIDR() error { ip, cidr, err := net.ParseCIDR(c.IPv6ClusterAllocCIDR) if err != nil { @@ -2905,20 +2833,13 @@ func (c *DaemonConfig) Validate(vp *viper.Viper) error { c.RoutingMode, RoutingModeTunnel, RoutingModeNative) } - switch c.TunnelProtocol { - case TunnelVXLAN, TunnelGeneve: - default: - return fmt.Errorf("invalid tunnel protocol %q", c.TunnelProtocol) - } - - if c.RoutingMode == RoutingModeNative && c.UseSingleClusterRoute { - return fmt.Errorf("option --%s cannot be used in combination with --%s=%s", - SingleClusterRouteName, RoutingMode, RoutingModeNative) - } - cinfo := clustermeshTypes.ClusterInfo{ - ID: c.ClusterID, - Name: c.ClusterName, + ID: c.ClusterID, + Name: c.ClusterName, + MaxConnectedClusters: c.MaxConnectedClusters, + } + if err := cinfo.InitClusterIDMax(); err != nil { + return err } if err := cinfo.Validate(); err != nil { return err @@ -3074,6 +2995,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.CGroupRoot = vp.GetString(CGroupRoot) c.ClusterID = vp.GetUint32(clustermeshTypes.OptClusterID) c.ClusterName = vp.GetString(clustermeshTypes.OptClusterName) + c.MaxConnectedClusters = vp.GetUint32(clustermeshTypes.OptMaxConnectedClusters) c.DatapathMode = vp.GetString(DatapathMode) c.Debug = vp.GetBool(DebugArg) c.DebugVerbose = vp.GetStringSlice(DebugVerbose) @@ -3127,9 +3049,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.EnableAutoProtectNodePortRange = vp.GetBool(EnableAutoProtectNodePortRange) c.KubeProxyReplacement = vp.GetString(KubeProxyReplacement) c.EnableSessionAffinity = vp.GetBool(EnableSessionAffinity) - c.EnableServiceTopology = vp.GetBool(EnableServiceTopology) - c.EnableBandwidthManager = vp.GetBool(EnableBandwidthManager) - c.EnableBBR = vp.GetBool(EnableBBR) c.EnableRecorder = vp.GetBool(EnableRecorder) c.EnableMKE = vp.GetBool(EnableMKE) c.CgroupPathMKE = vp.GetString(CgroupPathMKE) @@ -3147,6 +3066,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.IdentityChangeGracePeriod = vp.GetDuration(IdentityChangeGracePeriod) c.IdentityRestoreGracePeriod = vp.GetDuration(IdentityRestoreGracePeriod) c.IPAM = vp.GetString(IPAM) + c.IPAMDefaultIPPool = vp.GetString(IPAMDefaultIPPool) c.IPv4Range = vp.GetString(IPv4Range) c.IPv4NodeAddr = vp.GetString(IPv4NodeAddr) c.IPv4ServiceRange = vp.GetString(IPv4ServiceRange) @@ -3158,7 +3078,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.K8sRequireIPv4PodCIDR = vp.GetBool(K8sRequireIPv4PodCIDRName) c.K8sRequireIPv6PodCIDR = vp.GetBool(K8sRequireIPv6PodCIDRName) c.K8sServiceCacheSize = uint(vp.GetInt(K8sServiceCacheSize)) - c.K8sEventHandover = vp.GetBool(K8sEventHandover) + c.LegacyTurnOffK8sEventHandover = vp.GetBool(LegacyTurnOffK8sEventHandover) c.K8sSyncTimeout = vp.GetDuration(K8sSyncTimeoutName) c.AllocatorListTimeout = vp.GetDuration(AllocatorListTimeoutName) c.K8sWatcherEndpointSelector = vp.GetString(K8sWatcherEndpointSelector) @@ -3188,8 +3108,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.EnvoyConfigTimeout = vp.GetDuration(EnvoyConfigTimeout) c.IPMasqAgentConfigPath = vp.GetString(IPMasqAgentConfigPath) c.InstallIptRules = vp.GetBool(InstallIptRules) - c.IPTablesLockTimeout = vp.GetDuration(IPTablesLockTimeout) - c.IPTablesRandomFully = vp.GetBool(IPTablesRandomFully) c.IPSecKeyFile = vp.GetString(IPSecKeyFileName) c.IPsecKeyRotationDuration = vp.GetDuration(IPsecKeyRotationDuration) c.EnableIPsecKeyWatcher = vp.GetBool(EnableIPsecKeyWatcher) @@ -3197,7 +3115,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.MonitorAggregationInterval = vp.GetDuration(MonitorAggregationInterval) c.MTU = vp.GetInt(MTUName) c.PreAllocateMaps = vp.GetBool(PreAllocateMapsName) - c.PrependIptablesChains = vp.GetBool(PrependIptablesChainsName) c.ProcFs = vp.GetString(ProcFs) c.ProxyConnectTimeout = vp.GetInt(ProxyConnectTimeout) c.ProxyGID = vp.GetInt(ProxyGID) @@ -3210,7 +3127,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.RunDir = vp.GetString(StateDir) c.ExternalEnvoyProxy = vp.GetBool(ExternalEnvoyProxy) c.SidecarIstioProxyImage = vp.GetString(SidecarIstioProxyImage) - c.UseSingleClusterRoute = vp.GetBool(SingleClusterRouteName) c.SocketPath = vp.GetString(SocketPath) c.TracePayloadlen = vp.GetInt(TracePayloadlen) c.Version = vp.GetString(Version) @@ -3248,6 +3164,15 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.IPv6NAT46x64CIDR = defaults.IPv6NAT46x64CIDR c.IPAMCiliumNodeUpdateRate = vp.GetDuration(IPAMCiliumNodeUpdateRate) + c.ServiceNoBackendResponse = vp.GetString(ServiceNoBackendResponse) + switch c.ServiceNoBackendResponse { + case ServiceNoBackendResponseReject, ServiceNoBackendResponseDrop: + case "": + c.ServiceNoBackendResponse = defaults.ServiceNoBackendResponse + default: + log.Fatalf("Invalid value for --%s: %s (must be 'reject' or 'drop')", ServiceNoBackendResponse, c.ServiceNoBackendResponse) + } + c.populateLoadBalancerSettings(vp) c.populateDevices(vp) c.EnableRuntimeDeviceDetection = vp.GetBool(EnableRuntimeDeviceDetection) @@ -3269,38 +3194,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } c.TCFilterPriority = uint16(tcFilterPrio) - c.Tunnel = vp.GetString(TunnelName) c.RoutingMode = vp.GetString(RoutingMode) - c.TunnelProtocol = vp.GetString(TunnelProtocol) - c.TunnelPort = vp.GetInt(TunnelPortName) - - if c.Tunnel != "" && c.RoutingMode != defaults.RoutingMode { - log.Fatalf("Option --%s cannot be used in combination with --%s", RoutingMode, TunnelName) - } - - if c.Tunnel == "disabled" { - c.RoutingMode = RoutingModeNative - } else if c.Tunnel != "" { - c.TunnelProtocol = c.Tunnel - } - c.Tunnel = "" - - if c.TunnelPort == 0 { - // manually pick port for native-routing and DSR with Geneve dispatch: - if !c.TunnelingEnabled() && - (c.EnableNodePort || (c.KubeProxyReplacement == KubeProxyReplacementStrict || c.KubeProxyReplacement == KubeProxyReplacementTrue)) && - c.NodePortMode != NodePortModeSNAT && - c.LoadBalancerDSRDispatch == DSRDispatchGeneve { - c.TunnelPort = defaults.TunnelPortGeneve - } else { - switch c.TunnelProtocol { - case TunnelVXLAN: - c.TunnelPort = defaults.TunnelPortVXLAN - case TunnelGeneve: - c.TunnelPort = defaults.TunnelPortGeneve - } - } - } if vp.IsSet(AddressScopeMax) { c.AddressScopeMax, err = ip.ParseScope(vp.GetString(AddressScopeMax)) @@ -3405,8 +3299,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.FQDNProxyResponseMaxDelay = vp.GetDuration(FQDNProxyResponseMaxDelay) c.DNSProxyConcurrencyLimit = vp.GetInt(DNSProxyConcurrencyLimit) c.DNSProxyConcurrencyProcessingGracePeriod = vp.GetDuration(DNSProxyConcurrencyProcessingGracePeriod) - c.DNSProxyLockCount = vp.GetInt(DNSProxyLockCount) - c.DNSProxyLockTimeout = vp.GetDuration(DNSProxyLockTimeout) c.FQDNRejectResponse = vp.GetString(FQDNRejectResponseCode) // Convert IP strings into net.IPNet types @@ -3519,10 +3411,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { log.Warningf("Running Cilium with %q=%q requires endpoint CRDs. Changing %s to %t", KVStore, c.KVStore, DisableCiliumEndpointCRDName, false) c.DisableCiliumEndpointCRD = false } - if c.K8sEventHandover { - log.Warningf("Running Cilium with %q=%q requires KVStore capability. Changing %s to %t", KVStore, c.KVStore, K8sEventHandover, false) - c.K8sEventHandover = false - } } switch c.IPAM { @@ -3540,7 +3428,10 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } else { c.IPAMMultiPoolPreAllocation = m } - + if len(c.IPAMMultiPoolPreAllocation) == 0 { + // Default to the same value as IPAMDefaultIPPool + c.IPAMMultiPoolPreAllocation = map[string]string{c.IPAMDefaultIPPool: "8"} + } c.KubeProxyReplacementHealthzBindAddr = vp.GetString(KubeProxyReplacementHealthzBindAddr) // Hubble options. @@ -3597,6 +3488,8 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.HubbleExportFieldmask = vp.GetStringSlice(HubbleExportFieldmask) } + c.HubbleFlowlogsConfigFilePath = vp.GetString(HubbleFlowlogsConfigFilePath) + c.EnableHubbleRecorderAPI = vp.GetBool(EnableHubbleRecorderAPI) c.HubbleRecorderStoragePath = vp.GetString(HubbleRecorderStoragePath) c.HubbleRecorderSinkQueueSize = vp.GetInt(HubbleRecorderSinkQueueSize) @@ -3604,12 +3497,11 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.HubbleMonitorEvents = vp.GetStringSlice(HubbleMonitorEvents) c.HubbleRedactEnabled = vp.GetBool(HubbleRedactEnabled) c.HubbleRedactHttpURLQuery = vp.GetBool(HubbleRedactHttpURLQuery) + c.HubbleRedactHttpUserInfo = vp.GetBool(HubbleRedactHttpUserInfo) c.HubbleRedactKafkaApiKey = vp.GetBool(HubbleRedactKafkaApiKey) c.HubbleRedactHttpHeadersAllow = vp.GetStringSlice(HubbleRedactHttpHeadersAllow) c.HubbleRedactHttpHeadersDeny = vp.GetStringSlice(HubbleRedactHttpHeadersDeny) - c.DisableIptablesFeederRules = vp.GetStringSlice(DisableIptablesFeederRules) - // Hidden options c.CompilerFlags = vp.GetStringSlice(CompilerFlags) c.ConfigFile = vp.GetString(ConfigFile) @@ -3619,7 +3511,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.MaxControllerInterval = vp.GetInt(MaxCtrlIntervalName) c.PolicyQueueSize = sanitizeIntParam(vp, PolicyQueueSize, defaults.PolicyQueueSize) c.EndpointQueueSize = sanitizeIntParam(vp, EndpointQueueSize, defaults.EndpointQueueSize) - c.DisableCNPStatusUpdates = vp.GetBool(DisableCNPStatusUpdates) c.EnableICMPRules = vp.GetBool(EnableICMPRules) c.UseCiliumInternalIPForIPsec = vp.GetBool(UseCiliumInternalIPForIPsec) c.BypassIPAvailabilityUponRestore = vp.GetBool(BypassIPAvailabilityUponRestore) @@ -4417,3 +4308,13 @@ func parseBPFMapEventConfigs(confs BPFEventBufferConfigs, confMap map[string]str } return nil } + +func (d *DaemonConfig) EnforceLXCFibLookup() bool { + // See https://github.com/cilium/cilium/issues/27343 for the symptoms. + // + // We want to enforce FIB lookup if EndpointRoutes are enabled, because + // this was a config dependency change which caused different behaviour + // since v1.14.0-snapshot.2. We will remove this hack later, once we + // have auto-device detection on by default. + return d.EnableEndpointRoutes +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go b/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go index a9ad79e6ad..9edcab8bab 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go @@ -7,9 +7,9 @@ import ( "context" "fmt" "net/netip" - "sync" "github.com/cilium/cilium/pkg/ip" + "github.com/cilium/cilium/pkg/lock" ) const ( @@ -17,7 +17,7 @@ const ( ) var ( - providers = sync.Map{} // map with the list of providers to callback to retrieve info from. + providers lock.Map[string, GroupProviderFunc] // map with the list of providers to callback to retrieve info from. ) // GroupProviderFunc is a func that need to be register to be able to @@ -50,14 +50,10 @@ func (group *ToGroups) GetCidrSet(ctx context.Context) ([]CIDRRule, error) { var addrs []netip.Addr // Get per provider CIDRSet if group.AWS != nil { - callbackInterface, ok := providers.Load(AWSProvider) + callback, ok := providers.Load(AWSProvider) if !ok { return nil, fmt.Errorf("Provider %s is not registered", AWSProvider) } - callback, ok := callbackInterface.(GroupProviderFunc) - if !ok { - return nil, fmt.Errorf("Provider callback for %s is not a valid instance", AWSProvider) - } awsAddrs, err := callback(ctx, group) if err != nil { return nil, fmt.Errorf( diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go b/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go index 7424d34005..674f9c2508 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go @@ -146,7 +146,7 @@ const ( ForceNamespace Option = iota ) -func ResourceQualifiedName(namespace, cecName, resourceName string, options ...Option) string { +func ResourceQualifiedName(namespace, cecName, resourceName string, options ...Option) (name string, updated bool) { forceNamespace := false for _, option := range options { switch option { @@ -157,7 +157,7 @@ func ResourceQualifiedName(namespace, cecName, resourceName string, options ...O idx := strings.IndexRune(resourceName, '/') if resourceName == "" || idx >= 0 && (!forceNamespace || (idx == len(namespace) && strings.HasPrefix(resourceName, namespace))) { - return resourceName + return resourceName, false } var sb strings.Builder @@ -168,5 +168,5 @@ func ResourceQualifiedName(namespace, cecName, resourceName string, options ...O sb.WriteRune('/') sb.WriteString(resourceName) - return sb.String() + return sb.String(), true } diff --git a/vendor/github.com/cilium/cilium/pkg/source/source.go b/vendor/github.com/cilium/cilium/pkg/source/source.go new file mode 100644 index 0000000000..4156105ab5 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/source/source.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package source + +// Source describes the source of a definition +type Source string + +const ( + // Unspec is used when the source is unspecified + Unspec Source = "unspec" + + // KubeAPIServer is the source used for state which represents the + // kube-apiserver, such as the IPs associated with it. This is not to be + // confused with the Kubernetes source. + // KubeAPIServer state has the strongest ownership and can only be + // overwritten by itself. + KubeAPIServer Source = "kube-apiserver" + + // Local is the source used for state derived from local agent state. + // Local state has the second strongest ownership, behind KubeAPIServer. + Local Source = "local" + + // KVStore is the source used for state derived from a key value store. + // State in the key value stored takes precedence over orchestration + // system state such as Kubernetes. + KVStore Source = "kvstore" + + // CustomResource is the source used for state derived from Kubernetes + // custom resources + CustomResource Source = "custom-resource" + + // Kubernetes is the source used for state derived from Kubernetes + Kubernetes Source = "k8s" + + // LocalAPI is the source used for state derived from the API served + // locally on the node. + LocalAPI Source = "api" + + // Generated is the source used for generated state which can be + // overwritten by all other sources, except for restored (and unspec). + Generated Source = "generated" + + // Restored is the source used for restored state from data left behind + // by the previous agent instance. Can be overwritten by all other + // sources (except for unspec). + Restored Source = "restored" +) + +// AllowOverwrite returns true if new state from a particular source is allowed +// to overwrite existing state from another source +func AllowOverwrite(existing, new Source) bool { + switch existing { + + // KubeAPIServer state can only be overwritten by other kube-apiserver + // state. + case KubeAPIServer: + return new == KubeAPIServer + + // Local state can only be overwritten by other local state or + // kube-apiserver state. + case Local: + return new == Local || new == KubeAPIServer + + // KVStore can be overwritten by other kvstore, local state, or + // kube-apiserver state. + case KVStore: + return new == KVStore || new == Local || new == KubeAPIServer + + // Custom-resource state can be overwritten by other CRD, kvstore, + // local or kube-apiserver state. + case CustomResource: + return new == CustomResource || new == KVStore || new == Local || new == KubeAPIServer + + // Kubernetes state can be overwritten by everything except local API, + // generated, restored and unspecified state. + case Kubernetes: + return new != LocalAPI && new != Generated && new != Restored && new != Unspec + + // Local API state can be overwritten by everything except restored, + // generated and unspecified state + case LocalAPI: + return new != Generated && new != Restored && new != Unspec + + // Generated can be overwritten by everything except by Restored and + // Unspecified + case Generated: + return new != Restored && new != Unspec + + // Restored can be overwritten by everything except by Unspecified + case Restored: + return new != Unspec + + // Unspecified state can be overwritten by everything + case Unspec: + return true + } + + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/stream/operators.go b/vendor/github.com/cilium/cilium/pkg/stream/operators.go index 694a9c82b1..c12e404008 100644 --- a/vendor/github.com/cilium/cilium/pkg/stream/operators.go +++ b/vendor/github.com/cilium/cilium/pkg/stream/operators.go @@ -216,7 +216,13 @@ func Debounce[T any](src Observable[T], duration time.Duration) Observable[T] { complete(err) return - case item := <-items: + case item, ok := <-items: + if !ok { + items = nil + latest = nil + continue + } + if timerElapsed { next(item) timerElapsed = false diff --git a/vendor/github.com/cilium/cilium/pkg/time/time.go b/vendor/github.com/cilium/cilium/pkg/time/time.go index da8c291c53..694b21f031 100644 --- a/vendor/github.com/cilium/cilium/pkg/time/time.go +++ b/vendor/github.com/cilium/cilium/pkg/time/time.go @@ -105,7 +105,7 @@ func NewTicker(d Duration) *time.Ticker { } // NewTimer overrides the stdlib time.NewTimer to enforce maximum sleepiness -// va option.MaxInternalTimerDelay. +// via option.MaxInternalTimerDelay. func NewTimer(d Duration) *time.Timer { if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay { d = MaxInternalTimerDelay @@ -113,6 +113,13 @@ func NewTimer(d Duration) *time.Timer { return time.NewTimer(d) } +// NewTimerWithoutMaxDelay returns a time.NewTimer without enforcing maximum +// sleepiness. This function should only be used in cases where the timer firing +// early impacts correctness. If in doubt, you probably should use NewTimer. +func NewTimerWithoutMaxDelay(d Duration) *time.Timer { + return time.NewTimer(d) +} + // AfterFunc overrides the stdlib time.AfterFunc to enforce maximum sleepiness // via option.MaxInternalTimerDelay. func AfterFunc(d Duration, f func()) *time.Timer { diff --git a/vendor/github.com/cilium/ebpf/asm/alu.go b/vendor/github.com/cilium/ebpf/asm/alu.go index 7dc56204bc..282233d327 100644 --- a/vendor/github.com/cilium/ebpf/asm/alu.go +++ b/vendor/github.com/cilium/ebpf/asm/alu.go @@ -4,23 +4,23 @@ package asm // Source of ALU / ALU64 / Branch operations // -// msb lsb -// +----+-+---+ -// |op |S|cls| -// +----+-+---+ -type Source uint8 +// msb lsb +// +------------+-+---+ +// | op |S|cls| +// +------------+-+---+ +type Source uint16 -const sourceMask OpCode = 0x08 +const sourceMask OpCode = 0x0008 // Source bitmask const ( // InvalidSource is returned by getters when invoked // on non ALU / branch OpCodes. - InvalidSource Source = 0xff + InvalidSource Source = 0xffff // ImmSource src is from constant - ImmSource Source = 0x00 + ImmSource Source = 0x0000 // RegSource src is from register - RegSource Source = 0x08 + RegSource Source = 0x0008 ) // The Endianness of a byte swap instruction. @@ -39,46 +39,56 @@ const ( // ALUOp are ALU / ALU64 operations // -// msb lsb -// +----+-+---+ -// |OP |s|cls| -// +----+-+---+ -type ALUOp uint8 +// msb lsb +// +-------+----+-+---+ +// | EXT | OP |s|cls| +// +-------+----+-+---+ +type ALUOp uint16 -const aluMask OpCode = 0xf0 +const aluMask OpCode = 0x3ff0 const ( // InvalidALUOp is returned by getters when invoked // on non ALU OpCodes - InvalidALUOp ALUOp = 0xff + InvalidALUOp ALUOp = 0xffff // Add - addition - Add ALUOp = 0x00 + Add ALUOp = 0x0000 // Sub - subtraction - Sub ALUOp = 0x10 + Sub ALUOp = 0x0010 // Mul - multiplication - Mul ALUOp = 0x20 + Mul ALUOp = 0x0020 // Div - division - Div ALUOp = 0x30 + Div ALUOp = 0x0030 + // SDiv - signed division + SDiv ALUOp = Div + 0x0100 // Or - bitwise or - Or ALUOp = 0x40 + Or ALUOp = 0x0040 // And - bitwise and - And ALUOp = 0x50 + And ALUOp = 0x0050 // LSh - bitwise shift left - LSh ALUOp = 0x60 + LSh ALUOp = 0x0060 // RSh - bitwise shift right - RSh ALUOp = 0x70 + RSh ALUOp = 0x0070 // Neg - sign/unsign signing bit - Neg ALUOp = 0x80 + Neg ALUOp = 0x0080 // Mod - modulo - Mod ALUOp = 0x90 + Mod ALUOp = 0x0090 + // SMod - signed modulo + SMod ALUOp = Mod + 0x0100 // Xor - bitwise xor - Xor ALUOp = 0xa0 + Xor ALUOp = 0x00a0 // Mov - move value from one place to another - Mov ALUOp = 0xb0 + Mov ALUOp = 0x00b0 + // MovSX8 - move lower 8 bits, sign extended upper bits of target + MovSX8 ALUOp = Mov + 0x0100 + // MovSX16 - move lower 16 bits, sign extended upper bits of target + MovSX16 ALUOp = Mov + 0x0200 + // MovSX32 - move lower 32 bits, sign extended upper bits of target + MovSX32 ALUOp = Mov + 0x0300 // ArSh - arithmetic shift - ArSh ALUOp = 0xc0 + ArSh ALUOp = 0x00c0 // Swap - endian conversions - Swap ALUOp = 0xd0 + Swap ALUOp = 0x00d0 ) // HostTo converts from host to another endianness. @@ -102,6 +112,27 @@ func HostTo(endian Endianness, dst Register, size Size) Instruction { } } +// BSwap unconditionally reverses the order of bytes in a register. +func BSwap(dst Register, size Size) Instruction { + var imm int64 + switch size { + case Half: + imm = 16 + case Word: + imm = 32 + case DWord: + imm = 64 + default: + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(ALU64Class).SetALUOp(Swap), + Dst: dst, + Constant: imm, + } +} + // Op returns the OpCode for an ALU operation with a given source. func (op ALUOp) Op(source Source) OpCode { return OpCode(ALU64Class).SetALUOp(op).SetSource(source) diff --git a/vendor/github.com/cilium/ebpf/asm/alu_string.go b/vendor/github.com/cilium/ebpf/asm/alu_string.go index 72d3fe6292..35b406bf3f 100644 --- a/vendor/github.com/cilium/ebpf/asm/alu_string.go +++ b/vendor/github.com/cilium/ebpf/asm/alu_string.go @@ -8,7 +8,7 @@ func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} - _ = x[InvalidSource-255] + _ = x[InvalidSource-65535] _ = x[ImmSource-0] _ = x[RegSource-8] } @@ -25,7 +25,7 @@ func (i Source) String() string { return _Source_name_0 case i == 8: return _Source_name_1 - case i == 255: + case i == 65535: return _Source_name_2 default: return "Source(" + strconv.FormatInt(int64(i), 10) + ")" @@ -62,41 +62,51 @@ func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} - _ = x[InvalidALUOp-255] + _ = x[InvalidALUOp-65535] _ = x[Add-0] _ = x[Sub-16] _ = x[Mul-32] _ = x[Div-48] + _ = x[SDiv-304] _ = x[Or-64] _ = x[And-80] _ = x[LSh-96] _ = x[RSh-112] _ = x[Neg-128] _ = x[Mod-144] + _ = x[SMod-400] _ = x[Xor-160] _ = x[Mov-176] + _ = x[MovSX8-432] + _ = x[MovSX16-688] + _ = x[MovSX32-944] _ = x[ArSh-192] _ = x[Swap-208] } -const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapInvalidALUOp" +const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapSDivSModMovSX8MovSX16MovSX32InvalidALUOp" var _ALUOp_map = map[ALUOp]string{ - 0: _ALUOp_name[0:3], - 16: _ALUOp_name[3:6], - 32: _ALUOp_name[6:9], - 48: _ALUOp_name[9:12], - 64: _ALUOp_name[12:14], - 80: _ALUOp_name[14:17], - 96: _ALUOp_name[17:20], - 112: _ALUOp_name[20:23], - 128: _ALUOp_name[23:26], - 144: _ALUOp_name[26:29], - 160: _ALUOp_name[29:32], - 176: _ALUOp_name[32:35], - 192: _ALUOp_name[35:39], - 208: _ALUOp_name[39:43], - 255: _ALUOp_name[43:55], + 0: _ALUOp_name[0:3], + 16: _ALUOp_name[3:6], + 32: _ALUOp_name[6:9], + 48: _ALUOp_name[9:12], + 64: _ALUOp_name[12:14], + 80: _ALUOp_name[14:17], + 96: _ALUOp_name[17:20], + 112: _ALUOp_name[20:23], + 128: _ALUOp_name[23:26], + 144: _ALUOp_name[26:29], + 160: _ALUOp_name[29:32], + 176: _ALUOp_name[32:35], + 192: _ALUOp_name[35:39], + 208: _ALUOp_name[39:43], + 304: _ALUOp_name[43:47], + 400: _ALUOp_name[47:51], + 432: _ALUOp_name[51:57], + 688: _ALUOp_name[57:64], + 944: _ALUOp_name[64:71], + 65535: _ALUOp_name[71:83], } func (i ALUOp) String() string { diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go index ef01eaa35a..67cd39d6f6 100644 --- a/vendor/github.com/cilium/ebpf/asm/instruction.go +++ b/vendor/github.com/cilium/ebpf/asm/instruction.go @@ -60,6 +60,34 @@ func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, err } ins.Offset = int16(bo.Uint16(data[2:4])) + + if ins.OpCode.Class().IsALU() { + switch ins.OpCode.ALUOp() { + case Div: + if ins.Offset == 1 { + ins.OpCode = ins.OpCode.SetALUOp(SDiv) + ins.Offset = 0 + } + case Mod: + if ins.Offset == 1 { + ins.OpCode = ins.OpCode.SetALUOp(SMod) + ins.Offset = 0 + } + case Mov: + switch ins.Offset { + case 8: + ins.OpCode = ins.OpCode.SetALUOp(MovSX8) + ins.Offset = 0 + case 16: + ins.OpCode = ins.OpCode.SetALUOp(MovSX16) + ins.Offset = 0 + case 32: + ins.OpCode = ins.OpCode.SetALUOp(MovSX32) + ins.Offset = 0 + } + } + } + // Convert to int32 before widening to int64 // to ensure the signed bit is carried over. ins.Constant = int64(int32(bo.Uint32(data[4:8]))) @@ -106,8 +134,38 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) return 0, fmt.Errorf("can't marshal registers: %s", err) } + if ins.OpCode.Class().IsALU() { + newOffset := int16(0) + switch ins.OpCode.ALUOp() { + case SDiv: + ins.OpCode = ins.OpCode.SetALUOp(Div) + newOffset = 1 + case SMod: + ins.OpCode = ins.OpCode.SetALUOp(Mod) + newOffset = 1 + case MovSX8: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 8 + case MovSX16: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 16 + case MovSX32: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 32 + } + if newOffset != 0 && ins.Offset != 0 { + return 0, fmt.Errorf("extended ALU opcodes should have an .Offset of 0: %s", ins) + } + ins.Offset = newOffset + } + + op, err := ins.OpCode.bpfOpCode() + if err != nil { + return 0, err + } + data := make([]byte, InstructionSize) - data[0] = byte(ins.OpCode) + data[0] = op data[1] = byte(regs) bo.PutUint16(data[2:4], uint16(ins.Offset)) bo.PutUint32(data[4:8], uint32(cons)) @@ -298,9 +356,9 @@ func (ins Instruction) Format(f fmt.State, c rune) { goto ref } - fmt.Fprintf(f, "%v ", op) switch cls := op.Class(); { case cls.isLoadOrStore(): + fmt.Fprintf(f, "%v ", op) switch op.Mode() { case ImmMode: fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant) @@ -308,21 +366,30 @@ func (ins Instruction) Format(f fmt.State, c rune) { fmt.Fprintf(f, "imm: %d", ins.Constant) case IndMode: fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant) - case MemMode: + case MemMode, MemSXMode: fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant) case XAddMode: fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src) } case cls.IsALU(): - fmt.Fprintf(f, "dst: %s ", ins.Dst) - if op.ALUOp() == Swap || op.Source() == ImmSource { + fmt.Fprintf(f, "%v", op) + if op == Swap.Op(ImmSource) { + fmt.Fprintf(f, "%d", ins.Constant) + } + + fmt.Fprintf(f, " dst: %s ", ins.Dst) + switch { + case op.ALUOp() == Swap: + break + case op.Source() == ImmSource: fmt.Fprintf(f, "imm: %d", ins.Constant) - } else { + default: fmt.Fprintf(f, "src: %s", ins.Src) } case cls.IsJump(): + fmt.Fprintf(f, "%v ", op) switch jop := op.JumpOp(); jop { case Call: switch ins.Src { @@ -336,6 +403,13 @@ func (ins Instruction) Format(f fmt.State, c rune) { fmt.Fprint(f, BuiltinFunc(ins.Constant)) } + case Ja: + if ins.OpCode.Class() == Jump32Class { + fmt.Fprintf(f, "imm: %d", ins.Constant) + } else { + fmt.Fprintf(f, "off: %d", ins.Offset) + } + default: fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset) if op.Source() == ImmSource { @@ -344,6 +418,8 @@ func (ins Instruction) Format(f fmt.State, c rune) { fmt.Fprintf(f, "src: %s", ins.Src) } } + default: + fmt.Fprintf(f, "%v ", op) } ref: @@ -772,7 +848,8 @@ func (insns Instructions) encodeFunctionReferences() error { } switch { - case ins.IsFunctionReference() && ins.Constant == -1: + case ins.IsFunctionReference() && ins.Constant == -1, + ins.OpCode == Ja.opCode(Jump32Class, ImmSource) && ins.Constant == -1: symOffset, ok := symbolOffsets[ins.Reference()] if !ok { return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) diff --git a/vendor/github.com/cilium/ebpf/asm/jump.go b/vendor/github.com/cilium/ebpf/asm/jump.go index 9a525b21a5..2738d736b2 100644 --- a/vendor/github.com/cilium/ebpf/asm/jump.go +++ b/vendor/github.com/cilium/ebpf/asm/jump.go @@ -10,7 +10,7 @@ package asm // +----+-+---+ type JumpOp uint8 -const jumpMask OpCode = aluMask +const jumpMask OpCode = 0xf0 const ( // InvalidJumpOp is returned by getters when invoked @@ -103,13 +103,21 @@ func (op JumpOp) Reg32(dst, src Register, label string) Instruction { } func (op JumpOp) opCode(class Class, source Source) OpCode { - if op == Exit || op == Call || op == Ja { + if op == Exit || op == Call { return InvalidOpCode } return OpCode(class).SetJumpOp(op).SetSource(source) } +// LongJump returns a jump always instruction with a range of [-2^31, 2^31 - 1]. +func LongJump(label string) Instruction { + return Instruction{ + OpCode: Ja.opCode(Jump32Class, ImmSource), + Constant: -1, + }.WithReference(label) +} + // Label adjusts PC to the address of the label. func (op JumpOp) Label(label string) Instruction { if op == Call { diff --git a/vendor/github.com/cilium/ebpf/asm/load_store.go b/vendor/github.com/cilium/ebpf/asm/load_store.go index 574ee377c9..cdb5c5cfa4 100644 --- a/vendor/github.com/cilium/ebpf/asm/load_store.go +++ b/vendor/github.com/cilium/ebpf/asm/load_store.go @@ -24,6 +24,8 @@ const ( IndMode Mode = 0x40 // MemMode - load from memory MemMode Mode = 0x60 + // MemSXMode - load from memory, sign extension + MemSXMode Mode = 0x80 // XAddMode - add atomically across processors. XAddMode Mode = 0xc0 ) @@ -73,6 +75,11 @@ func LoadMemOp(size Size) OpCode { return OpCode(LdXClass).SetMode(MemMode).SetSize(size) } +// LoadMemSXOp returns the OpCode to load a value of given size from memory sign extended. +func LoadMemSXOp(size Size) OpCode { + return OpCode(LdXClass).SetMode(MemSXMode).SetSize(size) +} + // LoadMem emits `dst = *(size *)(src + offset)`. func LoadMem(dst, src Register, offset int16, size Size) Instruction { return Instruction{ @@ -83,6 +90,20 @@ func LoadMem(dst, src Register, offset int16, size Size) Instruction { } } +// LoadMemSX emits `dst = *(size *)(src + offset)` but sign extends dst. +func LoadMemSX(dst, src Register, offset int16, size Size) Instruction { + if size == DWord { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: LoadMemSXOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + // LoadImmOp returns the OpCode to load an immediate of given size. // // As of kernel 4.20, only DWord size is accepted. diff --git a/vendor/github.com/cilium/ebpf/asm/load_store_string.go b/vendor/github.com/cilium/ebpf/asm/load_store_string.go index 76d29a0756..c48080327c 100644 --- a/vendor/github.com/cilium/ebpf/asm/load_store_string.go +++ b/vendor/github.com/cilium/ebpf/asm/load_store_string.go @@ -13,6 +13,7 @@ func _() { _ = x[AbsMode-32] _ = x[IndMode-64] _ = x[MemMode-96] + _ = x[MemSXMode-128] _ = x[XAddMode-192] } @@ -21,8 +22,9 @@ const ( _Mode_name_1 = "AbsMode" _Mode_name_2 = "IndMode" _Mode_name_3 = "MemMode" - _Mode_name_4 = "XAddMode" - _Mode_name_5 = "InvalidMode" + _Mode_name_4 = "MemSXMode" + _Mode_name_5 = "XAddMode" + _Mode_name_6 = "InvalidMode" ) func (i Mode) String() string { @@ -35,10 +37,12 @@ func (i Mode) String() string { return _Mode_name_2 case i == 96: return _Mode_name_3 - case i == 192: + case i == 128: return _Mode_name_4 - case i == 255: + case i == 192: return _Mode_name_5 + case i == 255: + return _Mode_name_6 default: return "Mode(" + strconv.FormatInt(int64(i), 10) + ")" } diff --git a/vendor/github.com/cilium/ebpf/asm/opcode.go b/vendor/github.com/cilium/ebpf/asm/opcode.go index 845c5521f8..1dfd0b171a 100644 --- a/vendor/github.com/cilium/ebpf/asm/opcode.go +++ b/vendor/github.com/cilium/ebpf/asm/opcode.go @@ -66,18 +66,43 @@ func (cls Class) isJumpOrALU() bool { return cls.IsJump() || cls.IsALU() } -// OpCode is a packed eBPF opcode. +// OpCode represents a single operation. +// It is not a 1:1 mapping to real eBPF opcodes. // -// Its encoding is defined by a Class value: +// The encoding varies based on a 3-bit Class: // -// msb lsb -// +----+-+---+ -// | ???? |CLS| -// +----+-+---+ -type OpCode uint8 +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// ??? | CLS +// +// For ALUClass and ALUCLass32: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// OPC |S| CLS +// +// For LdClass, LdXclass, StClass and StXClass: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | MDE |SIZ| CLS +// +// For JumpClass, Jump32Class: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | OPC |S| CLS +type OpCode uint16 // InvalidOpCode is returned by setters on OpCode -const InvalidOpCode OpCode = 0xff +const InvalidOpCode OpCode = 0xffff + +// bpfOpCode returns the actual BPF opcode. +func (op OpCode) bpfOpCode() (byte, error) { + const opCodeMask = 0xff + + if !valid(op, opCodeMask) { + return 0, fmt.Errorf("invalid opcode %x", op) + } + + return byte(op & opCodeMask), nil +} // rawInstructions returns the number of BPF instructions required // to encode this opcode. @@ -147,7 +172,7 @@ func (op OpCode) JumpOp() JumpOp { jumpOp := JumpOp(op & jumpMask) // Some JumpOps are only supported by JumpClass, not Jump32Class. - if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call || jumpOp == Ja) { + if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call) { return InvalidJumpOp } @@ -234,17 +259,24 @@ func (op OpCode) String() string { } case class.IsALU(): + if op.ALUOp() == Swap && op.Class() == ALU64Class { + // B to make BSwap, uncontitional byte swap + f.WriteString("B") + } + f.WriteString(op.ALUOp().String()) if op.ALUOp() == Swap { - // Width for Endian is controlled by Constant - f.WriteString(op.Endianness().String()) + if op.Class() == ALUClass { + // Width for Endian is controlled by Constant + f.WriteString(op.Endianness().String()) + } } else { + f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) + if class == ALUClass { f.WriteString("32") } - - f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) } case class.IsJump(): @@ -254,7 +286,7 @@ func (op OpCode) String() string { f.WriteString("32") } - if jop := op.JumpOp(); jop != Exit && jop != Call { + if jop := op.JumpOp(); jop != Exit && jop != Call && jop != Ja { f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) } diff --git a/vendor/github.com/cilium/ebpf/btf/btf.go b/vendor/github.com/cilium/ebpf/btf/btf.go index a2ee2d1306..80f64d78ae 100644 --- a/vendor/github.com/cilium/ebpf/btf/btf.go +++ b/vendor/github.com/cilium/ebpf/btf/btf.go @@ -209,12 +209,7 @@ func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error } } - rawTypes, rawStrings, err := parseBTF(btf, bo, baseStrings) - if err != nil { - return nil, err - } - - types, err := inflateRawTypes(rawTypes, rawStrings, base) + types, rawStrings, err := parseBTF(btf, bo, baseStrings, base) if err != nil { return nil, err } @@ -322,12 +317,12 @@ func loadKernelSpec() (_ *Spec, fallback bool, _ error) { } defer file.Close() - spec, err := loadSpecFromELF(file) + spec, err := LoadSpecFromReader(file) return spec, true, err } // findVMLinux scans multiple well-known paths for vmlinux kernel images. -func findVMLinux() (*internal.SafeELFFile, error) { +func findVMLinux() (*os.File, error) { release, err := internal.KernelRelease() if err != nil { return nil, err @@ -346,7 +341,7 @@ func findVMLinux() (*internal.SafeELFFile, error) { } for _, loc := range locations { - file, err := internal.OpenSafeELFFile(fmt.Sprintf(loc, release)) + file, err := os.Open(fmt.Sprintf(loc, release)) if errors.Is(err, os.ErrNotExist) { continue } @@ -373,7 +368,7 @@ func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder { // parseBTF reads a .BTF section into memory and parses it into a list of // raw types and a string table. -func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable) ([]rawType, *stringTable, error) { +func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable, base *Spec) ([]Type, *stringTable, error) { buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64) header, err := parseBTFHeader(buf, bo) if err != nil { @@ -387,12 +382,12 @@ func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable) ([ } buf.Reset(io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen))) - rawTypes, err := readTypes(buf, bo, header.TypeLen) + types, err := readAndInflateTypes(buf, bo, header.TypeLen, rawStrings, base) if err != nil { - return nil, nil, fmt.Errorf("can't read types: %w", err) + return nil, nil, err } - return rawTypes, rawStrings, nil + return types, rawStrings, nil } type symbol struct { diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types.go b/vendor/github.com/cilium/ebpf/btf/btf_types.go index c9984b2d44..f0e327abc0 100644 --- a/vendor/github.com/cilium/ebpf/btf/btf_types.go +++ b/vendor/github.com/cilium/ebpf/btf/btf_types.go @@ -153,6 +153,19 @@ type btfType struct { SizeType uint32 } +var btfTypeSize = int(unsafe.Sizeof(btfType{})) + +func unmarshalBtfType(bt *btfType, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfTypeSize { + return 0, fmt.Errorf("not enough bytes to unmarshal btfType") + } + + bt.NameOff = bo.Uint32(b[0:]) + bt.Info = bo.Uint32(b[4:]) + bt.SizeType = bo.Uint32(b[8:]) + return btfTypeSize, nil +} + func mask(len uint32) uint32 { return (1 << len) - 1 } @@ -300,6 +313,17 @@ const ( btfIntBitsShift = 0 ) +var btfIntLen = int(unsafe.Sizeof(btfInt{})) + +func unmarshalBtfInt(bi *btfInt, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfIntLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfInt") + } + + bi.Raw = bo.Uint32(b[0:]) + return btfIntLen, nil +} + func (bi btfInt) Encoding() IntEncoding { return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift)) } @@ -330,102 +354,166 @@ type btfArray struct { Nelems uint32 } +var btfArrayLen = int(unsafe.Sizeof(btfArray{})) + +func unmarshalBtfArray(ba *btfArray, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfArrayLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfArray") + } + + ba.Type = TypeID(bo.Uint32(b[0:])) + ba.IndexType = TypeID(bo.Uint32(b[4:])) + ba.Nelems = bo.Uint32(b[8:]) + return btfArrayLen, nil +} + type btfMember struct { NameOff uint32 Type TypeID Offset uint32 } +var btfMemberLen = int(unsafe.Sizeof(btfMember{})) + +func unmarshalBtfMembers(members []btfMember, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range members { + if off+btfMemberLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfMember %d", i) + } + + members[i].NameOff = bo.Uint32(b[off+0:]) + members[i].Type = TypeID(bo.Uint32(b[off+4:])) + members[i].Offset = bo.Uint32(b[off+8:]) + + off += btfMemberLen + } + + return off, nil +} + type btfVarSecinfo struct { Type TypeID Offset uint32 Size uint32 } +var btfVarSecinfoLen = int(unsafe.Sizeof(btfVarSecinfo{})) + +func unmarshalBtfVarSecInfos(secinfos []btfVarSecinfo, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range secinfos { + if off+btfVarSecinfoLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfVarSecinfo %d", i) + } + + secinfos[i].Type = TypeID(bo.Uint32(b[off+0:])) + secinfos[i].Offset = bo.Uint32(b[off+4:]) + secinfos[i].Size = bo.Uint32(b[off+8:]) + + off += btfVarSecinfoLen + } + + return off, nil +} + type btfVariable struct { Linkage uint32 } +var btfVariableLen = int(unsafe.Sizeof(btfVariable{})) + +func unmarshalBtfVariable(bv *btfVariable, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfVariableLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfVariable") + } + + bv.Linkage = bo.Uint32(b[0:]) + return btfVariableLen, nil +} + type btfEnum struct { NameOff uint32 Val uint32 } +var btfEnumLen = int(unsafe.Sizeof(btfEnum{})) + +func unmarshalBtfEnums(enums []btfEnum, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range enums { + if off+btfEnumLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum %d", i) + } + + enums[i].NameOff = bo.Uint32(b[off+0:]) + enums[i].Val = bo.Uint32(b[off+4:]) + + off += btfEnumLen + } + + return off, nil +} + type btfEnum64 struct { NameOff uint32 ValLo32 uint32 ValHi32 uint32 } +var btfEnum64Len = int(unsafe.Sizeof(btfEnum64{})) + +func unmarshalBtfEnums64(enums []btfEnum64, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range enums { + if off+btfEnum64Len > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum64 %d", i) + } + + enums[i].NameOff = bo.Uint32(b[off+0:]) + enums[i].ValLo32 = bo.Uint32(b[off+4:]) + enums[i].ValHi32 = bo.Uint32(b[off+8:]) + + off += btfEnum64Len + } + + return off, nil +} + type btfParam struct { NameOff uint32 Type TypeID } -type btfDeclTag struct { - ComponentIdx uint32 -} +var btfParamLen = int(unsafe.Sizeof(btfParam{})) -func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, error) { - var header btfType - // because of the interleaving between types and struct members it is difficult to - // precompute the numbers of raw types this will parse - // this "guess" is a good first estimation - sizeOfbtfType := uintptr(btfTypeLen) - tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2 - types := make([]rawType, 0, tyMaxCount) - - for id := TypeID(1); ; id++ { - if err := binary.Read(r, bo, &header); err == io.EOF { - return types, nil - } else if err != nil { - return nil, fmt.Errorf("can't read type info for id %v: %v", id, err) +func unmarshalBtfParams(params []btfParam, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range params { + if off+btfParamLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfParam %d", i) } - var data interface{} - switch header.Kind() { - case kindInt: - data = new(btfInt) - case kindPointer: - case kindArray: - data = new(btfArray) - case kindStruct: - fallthrough - case kindUnion: - data = make([]btfMember, header.Vlen()) - case kindEnum: - data = make([]btfEnum, header.Vlen()) - case kindForward: - case kindTypedef: - case kindVolatile: - case kindConst: - case kindRestrict: - case kindFunc: - case kindFuncProto: - data = make([]btfParam, header.Vlen()) - case kindVar: - data = new(btfVariable) - case kindDatasec: - data = make([]btfVarSecinfo, header.Vlen()) - case kindFloat: - case kindDeclTag: - data = new(btfDeclTag) - case kindTypeTag: - case kindEnum64: - data = make([]btfEnum64, header.Vlen()) - default: - return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind()) - } + params[i].NameOff = bo.Uint32(b[off+0:]) + params[i].Type = TypeID(bo.Uint32(b[off+4:])) - if data == nil { - types = append(types, rawType{header, nil}) - continue - } + off += btfParamLen + } - if err := binary.Read(r, bo, data); err != nil { - return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err) - } + return off, nil +} + +type btfDeclTag struct { + ComponentIdx uint32 +} - types = append(types, rawType{header, data}) +var btfDeclTagLen = int(unsafe.Sizeof(btfDeclTag{})) + +func unmarshalBtfDeclTag(bdt *btfDeclTag, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfDeclTagLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfDeclTag") } + + bdt.ComponentIdx = bo.Uint32(b[0:]) + return btfDeclTagLen, nil } diff --git a/vendor/github.com/cilium/ebpf/btf/core.go b/vendor/github.com/cilium/ebpf/btf/core.go index dec2cc7606..ded7d43d48 100644 --- a/vendor/github.com/cilium/ebpf/btf/core.go +++ b/vendor/github.com/cilium/ebpf/btf/core.go @@ -18,8 +18,8 @@ import ( // COREFixup is the result of computing a CO-RE relocation for a target. type COREFixup struct { kind coreKind - local uint32 - target uint32 + local uint64 + target uint64 // True if there is no valid fixup. The instruction is replaced with an // invalid dummy. poison bool @@ -196,12 +196,12 @@ func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([ result[i] = COREFixup{ kind: relo.kind, - local: uint32(relo.id), + local: uint64(relo.id), // NB: Using relo.id as the target here is incorrect, since // it doesn't match the BTF we generate on the fly. This isn't // too bad for now since there are no uses of the local type ID // in the kernel, yet. - target: uint32(relo.id), + target: uint64(relo.id), } continue } @@ -311,10 +311,10 @@ var errNoSignedness = errors.New("no signedness") // coreCalculateFixup calculates the fixup for a single local type, target type // and relocation. func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo binary.ByteOrder) (COREFixup, error) { - fixup := func(local, target uint32) (COREFixup, error) { + fixup := func(local, target uint64) (COREFixup, error) { return COREFixup{kind: relo.kind, local: local, target: target}, nil } - fixupWithoutValidation := func(local, target uint32) (COREFixup, error) { + fixupWithoutValidation := func(local, target uint64) (COREFixup, error) { return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil } poison := func() (COREFixup, error) { @@ -346,7 +346,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b return fixup(1, 1) case reloTypeIDTarget: - return fixup(uint32(relo.id), uint32(targetID)) + return fixup(uint64(relo.id), uint64(targetID)) case reloTypeSize: localSize, err := Sizeof(local) @@ -359,7 +359,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b return zero, err } - return fixup(uint32(localSize), uint32(targetSize)) + return fixup(uint64(localSize), uint64(targetSize)) } case reloEnumvalValue, reloEnumvalExists: @@ -376,7 +376,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b return fixup(1, 1) case reloEnumvalValue: - return fixup(uint32(localValue.Value), uint32(targetValue.Value)) + return fixup(localValue.Value, targetValue.Value) } case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64, reloFieldSigned: @@ -405,7 +405,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b return fixup(1, 1) case reloFieldByteOffset: - return maybeSkipValidation(fixup(localField.offset, targetField.offset)) + return maybeSkipValidation(fixup(uint64(localField.offset), uint64(targetField.offset))) case reloFieldByteSize: localSize, err := Sizeof(localField.Type) @@ -417,24 +417,24 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b if err != nil { return zero, err } - return maybeSkipValidation(fixup(uint32(localSize), uint32(targetSize))) + return maybeSkipValidation(fixup(uint64(localSize), uint64(targetSize))) case reloFieldLShiftU64: - var target uint32 + var target uint64 if bo == binary.LittleEndian { targetSize, err := targetField.sizeBits() if err != nil { return zero, err } - target = uint32(64 - targetField.bitfieldOffset - targetSize) + target = uint64(64 - targetField.bitfieldOffset - targetSize) } else { loadWidth, err := Sizeof(targetField.Type) if err != nil { return zero, err } - target = uint32(64 - Bits(loadWidth*8) + targetField.bitfieldOffset) + target = uint64(64 - Bits(loadWidth*8) + targetField.bitfieldOffset) } return fixupWithoutValidation(0, target) @@ -444,7 +444,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b return zero, err } - return fixupWithoutValidation(0, uint32(64-targetSize)) + return fixupWithoutValidation(0, uint64(64-targetSize)) case reloFieldSigned: switch local := UnderlyingType(localField.Type).(type) { @@ -454,7 +454,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b return zero, fmt.Errorf("target isn't *Enum but %T", targetField.Type) } - return fixup(boolToUint32(local.Signed), boolToUint32(target.Signed)) + return fixup(boolToUint64(local.Signed), boolToUint64(target.Signed)) case *Int: target, ok := as[*Int](targetField.Type) if !ok { @@ -462,8 +462,8 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b } return fixup( - uint32(local.Encoding&Signed), - uint32(target.Encoding&Signed), + uint64(local.Encoding&Signed), + uint64(target.Encoding&Signed), ) default: return zero, fmt.Errorf("type %T: %w", local, errNoSignedness) @@ -474,7 +474,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b return zero, ErrNotSupported } -func boolToUint32(val bool) uint32 { +func boolToUint64(val bool) uint64 { if val { return 1 } diff --git a/vendor/github.com/cilium/ebpf/btf/ext_info.go b/vendor/github.com/cilium/ebpf/btf/ext_info.go index 3eae08b28e..d85f45ae9d 100644 --- a/vendor/github.com/cilium/ebpf/btf/ext_info.go +++ b/vendor/github.com/cilium/ebpf/btf/ext_info.go @@ -556,21 +556,21 @@ func LoadLineInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec return newLineInfos(lis, spec.strings) } -func newLineInfo(li bpfLineInfo, strings *stringTable) (*lineInfo, error) { +func newLineInfo(li bpfLineInfo, strings *stringTable) (lineInfo, error) { line, err := strings.Lookup(li.LineOff) if err != nil { - return nil, fmt.Errorf("lookup of line: %w", err) + return lineInfo{}, fmt.Errorf("lookup of line: %w", err) } fileName, err := strings.Lookup(li.FileNameOff) if err != nil { - return nil, fmt.Errorf("lookup of filename: %w", err) + return lineInfo{}, fmt.Errorf("lookup of filename: %w", err) } lineNumber := li.LineCol >> bpfLineShift lineColumn := li.LineCol & bpfColumnMax - return &lineInfo{ + return lineInfo{ &Line{ fileName, line, @@ -590,7 +590,7 @@ func newLineInfos(blis []bpfLineInfo, strings *stringTable) (LineInfos, error) { if err != nil { return LineInfos{}, fmt.Errorf("offset %d: %w", bli.InsnOff, err) } - lis.infos = append(lis.infos, *li) + lis.infos = append(lis.infos, li) } sort.Slice(lis.infos, func(i, j int) bool { return lis.infos[i].offset <= lis.infos[j].offset @@ -666,7 +666,6 @@ func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map // These records appear after a btf_ext_info_sec header in the line_info // sub-section of .BTF.ext. func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfLineInfo, error) { - var out []bpfLineInfo var li bpfLineInfo if exp, got := uint32(binary.Size(li)), recordSize; exp != got { @@ -674,6 +673,7 @@ func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, r return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got) } + out := make([]bpfLineInfo, 0, recordNum) for i := uint32(0); i < recordNum; i++ { if err := binary.Read(r, bo, &li); err != nil { return nil, fmt.Errorf("can't read line info: %v", err) diff --git a/vendor/github.com/cilium/ebpf/btf/strings.go b/vendor/github.com/cilium/ebpf/btf/strings.go index 0ddf1d24fb..114d250018 100644 --- a/vendor/github.com/cilium/ebpf/btf/strings.go +++ b/vendor/github.com/cilium/ebpf/btf/strings.go @@ -15,6 +15,7 @@ import ( type stringTable struct { base *stringTable offsets []uint32 + prevIdx int strings []string } @@ -61,7 +62,7 @@ func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) { return nil, errors.New("first item in string table is non-empty") } - return &stringTable{base, offsets, strings}, nil + return &stringTable{base, offsets, 0, strings}, nil } func splitNull(data []byte, atEOF bool) (advance int, token []byte, err error) { @@ -84,26 +85,29 @@ func (st *stringTable) Lookup(offset uint32) (string, error) { } func (st *stringTable) lookup(offset uint32) (string, error) { + // Fast path: zero offset is the empty string, looked up frequently. + if offset == 0 && st.base == nil { + return "", nil + } + + // Accesses tend to be globally increasing, so check if the next string is + // the one we want. This skips the binary search in about 50% of cases. + if st.prevIdx+1 < len(st.offsets) && st.offsets[st.prevIdx+1] == offset { + st.prevIdx++ + return st.strings[st.prevIdx], nil + } + i, found := slices.BinarySearch(st.offsets, offset) if !found { return "", fmt.Errorf("offset %d isn't start of a string", offset) } - return st.strings[i], nil -} - -func (st *stringTable) Marshal(w io.Writer) error { - for _, str := range st.strings { - _, err := io.WriteString(w, str) - if err != nil { - return err - } - _, err = w.Write([]byte{0}) - if err != nil { - return err - } + // Set the new increment index, but only if its greater than the current. + if i > st.prevIdx+1 { + st.prevIdx = i } - return nil + + return st.strings[i], nil } // Num returns the number of strings in the table. diff --git a/vendor/github.com/cilium/ebpf/btf/types.go b/vendor/github.com/cilium/ebpf/btf/types.go index 8fe329aa4e..8bd7fc77fc 100644 --- a/vendor/github.com/cilium/ebpf/btf/types.go +++ b/vendor/github.com/cilium/ebpf/btf/types.go @@ -1,6 +1,7 @@ package btf import ( + "encoding/binary" "errors" "fmt" "io" @@ -10,6 +11,7 @@ import ( "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal/sys" + "golang.org/x/exp/slices" ) // Mirrors MAX_RESOLVE_DEPTH in libbpf. @@ -117,7 +119,7 @@ type Int struct { } func (i *Int) Format(fs fmt.State, verb rune) { - formatType(fs, verb, i, i.Encoding, "size=", i.Size*8) + formatType(fs, verb, i, i.Encoding, "size=", i.Size) } func (i *Int) TypeName() string { return i.Name } @@ -726,17 +728,22 @@ func (c *copier) copy(typ *Type, transform Transformer) { type typeDeque = internal.Deque[*Type] -// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns -// it into a graph of Types connected via pointers. +// readAndInflateTypes reads the raw btf type info and turns it into a graph +// of Types connected via pointers. // -// If base is provided, then the raw types are considered to be of a split BTF +// If base is provided, then the types are considered to be of a split BTF // (e.g., a kernel module). // // Returns a slice of types indexed by TypeID. Since BTF ignores compilation // units, multiple types may share the same name. A Type may form a cyclic graph // by pointing at itself. -func inflateRawTypes(rawTypes []rawType, rawStrings *stringTable, base *Spec) ([]Type, error) { - types := make([]Type, 0, len(rawTypes)+1) // +1 for Void added to base types +func readAndInflateTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32, rawStrings *stringTable, base *Spec) ([]Type, error) { + // because of the interleaving between types and struct members it is difficult to + // precompute the numbers of raw types this will parse + // this "guess" is a good first estimation + sizeOfbtfType := uintptr(btfTypeLen) + tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2 + types := make([]Type, 0, tyMaxCount) // Void is defined to always be type ID 0, and is thus omitted from BTF. types = append(types, (*Void)(nil)) @@ -841,62 +848,128 @@ func inflateRawTypes(rawTypes []rawType, rawStrings *stringTable, base *Spec) ([ return members, nil } + var ( + buf = make([]byte, 1024) + header btfType + bInt btfInt + bArr btfArray + bMembers []btfMember + bEnums []btfEnum + bParams []btfParam + bVariable btfVariable + bSecInfos []btfVarSecinfo + bDeclTag btfDeclTag + bEnums64 []btfEnum64 + ) + var declTags []*declTag - for _, raw := range rawTypes { + for { var ( id = firstTypeID + TypeID(len(types)) typ Type ) + if _, err := io.ReadFull(r, buf[:btfTypeLen]); err == io.EOF { + break + } else if err != nil { + return nil, fmt.Errorf("can't read type info for id %v: %v", id, err) + } + + if _, err := unmarshalBtfType(&header, buf[:btfTypeLen], bo); err != nil { + return nil, fmt.Errorf("can't unmarshal type info for id %v: %v", id, err) + } + if id < firstTypeID { return nil, fmt.Errorf("no more type IDs") } - name, err := rawStrings.Lookup(raw.NameOff) + name, err := rawStrings.Lookup(header.NameOff) if err != nil { return nil, fmt.Errorf("get name for type id %d: %w", id, err) } - switch raw.Kind() { + switch header.Kind() { case kindInt: - size := raw.Size() - bi := raw.data.(*btfInt) - if bi.Offset() > 0 || bi.Bits().Bytes() != size { - legacyBitfields[id] = [2]Bits{bi.Offset(), bi.Bits()} + size := header.Size() + buf = buf[:btfIntLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfInt, id: %d: %w", id, err) + } + if _, err := unmarshalBtfInt(&bInt, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfInt, id: %d: %w", id, err) + } + if bInt.Offset() > 0 || bInt.Bits().Bytes() != size { + legacyBitfields[id] = [2]Bits{bInt.Offset(), bInt.Bits()} } - typ = &Int{name, raw.Size(), bi.Encoding()} + typ = &Int{name, header.Size(), bInt.Encoding()} case kindPointer: ptr := &Pointer{nil} - fixup(raw.Type(), &ptr.Target) + fixup(header.Type(), &ptr.Target) typ = ptr case kindArray: - btfArr := raw.data.(*btfArray) - arr := &Array{nil, nil, btfArr.Nelems} - fixup(btfArr.IndexType, &arr.Index) - fixup(btfArr.Type, &arr.Type) + buf = buf[:btfArrayLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfArray, id: %d: %w", id, err) + } + if _, err := unmarshalBtfArray(&bArr, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfArray, id: %d: %w", id, err) + } + + arr := &Array{nil, nil, bArr.Nelems} + fixup(bArr.IndexType, &arr.Index) + fixup(bArr.Type, &arr.Type) typ = arr case kindStruct: - members, err := convertMembers(raw.data.([]btfMember), raw.Bitfield()) + vlen := header.Vlen() + bMembers = slices.Grow(bMembers[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfMemberLen)[:vlen*btfMemberLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfMembers, id: %d: %w", id, err) + } + if _, err := unmarshalBtfMembers(bMembers, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfMembers, id: %d: %w", id, err) + } + + members, err := convertMembers(bMembers, header.Bitfield()) if err != nil { return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) } - typ = &Struct{name, raw.Size(), members} + typ = &Struct{name, header.Size(), members} case kindUnion: - members, err := convertMembers(raw.data.([]btfMember), raw.Bitfield()) + vlen := header.Vlen() + bMembers = slices.Grow(bMembers[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfMemberLen)[:vlen*btfMemberLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfMembers, id: %d: %w", id, err) + } + if _, err := unmarshalBtfMembers(bMembers, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfMembers, id: %d: %w", id, err) + } + + members, err := convertMembers(bMembers, header.Bitfield()) if err != nil { return nil, fmt.Errorf("union %s (id %d): %w", name, id, err) } - typ = &Union{name, raw.Size(), members} + typ = &Union{name, header.Size(), members} case kindEnum: - rawvals := raw.data.([]btfEnum) - vals := make([]EnumValue, 0, len(rawvals)) - signed := raw.Signed() - for i, btfVal := range rawvals { + vlen := header.Vlen() + bEnums = slices.Grow(bEnums[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfEnumLen)[:vlen*btfEnumLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfEnums, id: %d: %w", id, err) + } + if _, err := unmarshalBtfEnums(bEnums, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfEnums, id: %d: %w", id, err) + } + + vals := make([]EnumValue, 0, vlen) + signed := header.Signed() + for i, btfVal := range bEnums { name, err := rawStrings.Lookup(btfVal.NameOff) if err != nil { return nil, fmt.Errorf("get name for enum value %d: %s", i, err) @@ -908,40 +981,49 @@ func inflateRawTypes(rawTypes []rawType, rawStrings *stringTable, base *Spec) ([ } vals = append(vals, EnumValue{name, value}) } - typ = &Enum{name, raw.Size(), signed, vals} + typ = &Enum{name, header.Size(), signed, vals} case kindForward: - typ = &Fwd{name, raw.FwdKind()} + typ = &Fwd{name, header.FwdKind()} case kindTypedef: typedef := &Typedef{name, nil} - fixup(raw.Type(), &typedef.Type) + fixup(header.Type(), &typedef.Type) typ = typedef case kindVolatile: volatile := &Volatile{nil} - fixup(raw.Type(), &volatile.Type) + fixup(header.Type(), &volatile.Type) typ = volatile case kindConst: cnst := &Const{nil} - fixup(raw.Type(), &cnst.Type) + fixup(header.Type(), &cnst.Type) typ = cnst case kindRestrict: restrict := &Restrict{nil} - fixup(raw.Type(), &restrict.Type) + fixup(header.Type(), &restrict.Type) typ = restrict case kindFunc: - fn := &Func{name, nil, raw.Linkage()} - fixup(raw.Type(), &fn.Type) + fn := &Func{name, nil, header.Linkage()} + fixup(header.Type(), &fn.Type) typ = fn case kindFuncProto: - rawparams := raw.data.([]btfParam) - params := make([]FuncParam, 0, len(rawparams)) - for i, param := range rawparams { + vlen := header.Vlen() + bParams = slices.Grow(bParams[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfParamLen)[:vlen*btfParamLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfParams, id: %d: %w", id, err) + } + if _, err := unmarshalBtfParams(bParams, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfParams, id: %d: %w", id, err) + } + + params := make([]FuncParam, 0, vlen) + for i, param := range bParams { name, err := rawStrings.Lookup(param.NameOff) if err != nil { return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) @@ -951,57 +1033,90 @@ func inflateRawTypes(rawTypes []rawType, rawStrings *stringTable, base *Spec) ([ }) } for i := range params { - fixup(rawparams[i].Type, ¶ms[i].Type) + fixup(bParams[i].Type, ¶ms[i].Type) } fp := &FuncProto{nil, params} - fixup(raw.Type(), &fp.Return) + fixup(header.Type(), &fp.Return) typ = fp case kindVar: - variable := raw.data.(*btfVariable) - v := &Var{name, nil, VarLinkage(variable.Linkage)} - fixup(raw.Type(), &v.Type) + buf = buf[:btfVariableLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err) + } + if _, err := unmarshalBtfVariable(&bVariable, buf, bo); err != nil { + return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err) + } + + v := &Var{name, nil, VarLinkage(bVariable.Linkage)} + fixup(header.Type(), &v.Type) typ = v case kindDatasec: - btfVars := raw.data.([]btfVarSecinfo) - vars := make([]VarSecinfo, 0, len(btfVars)) - for _, btfVar := range btfVars { + vlen := header.Vlen() + bSecInfos = slices.Grow(bSecInfos[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfVarSecinfoLen)[:vlen*btfVarSecinfoLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfVarSecInfos, id: %d: %w", id, err) + } + if _, err := unmarshalBtfVarSecInfos(bSecInfos, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfVarSecInfos, id: %d: %w", id, err) + } + + vars := make([]VarSecinfo, 0, vlen) + for _, btfVar := range bSecInfos { vars = append(vars, VarSecinfo{ Offset: btfVar.Offset, Size: btfVar.Size, }) } for i := range vars { - fixup(btfVars[i].Type, &vars[i].Type) + fixup(bSecInfos[i].Type, &vars[i].Type) } - typ = &Datasec{name, raw.Size(), vars} + typ = &Datasec{name, header.Size(), vars} case kindFloat: - typ = &Float{name, raw.Size()} + typ = &Float{name, header.Size()} case kindDeclTag: - btfIndex := raw.data.(*btfDeclTag).ComponentIdx + buf = buf[:btfDeclTagLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err) + } + if _, err := unmarshalBtfDeclTag(&bDeclTag, buf, bo); err != nil { + return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err) + } + + btfIndex := bDeclTag.ComponentIdx if uint64(btfIndex) > math.MaxInt { return nil, fmt.Errorf("type id %d: index exceeds int", id) } dt := &declTag{nil, name, int(int32(btfIndex))} - fixup(raw.Type(), &dt.Type) + fixup(header.Type(), &dt.Type) typ = dt declTags = append(declTags, dt) case kindTypeTag: tt := &typeTag{nil, name} - fixup(raw.Type(), &tt.Type) + fixup(header.Type(), &tt.Type) typ = tt case kindEnum64: - rawvals := raw.data.([]btfEnum64) - vals := make([]EnumValue, 0, len(rawvals)) - for i, btfVal := range rawvals { + vlen := header.Vlen() + bEnums64 = slices.Grow(bEnums64[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfEnum64Len)[:vlen*btfEnum64Len] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfEnum64s, id: %d: %w", id, err) + } + if _, err := unmarshalBtfEnums64(bEnums64, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfEnum64s, id: %d: %w", id, err) + } + + vals := make([]EnumValue, 0, vlen) + for i, btfVal := range bEnums64 { name, err := rawStrings.Lookup(btfVal.NameOff) if err != nil { return nil, fmt.Errorf("get name for enum64 value %d: %s", i, err) @@ -1009,10 +1124,10 @@ func inflateRawTypes(rawTypes []rawType, rawStrings *stringTable, base *Spec) ([ value := (uint64(btfVal.ValHi32) << 32) | uint64(btfVal.ValLo32) vals = append(vals, EnumValue{name, value}) } - typ = &Enum{name, raw.Size(), raw.Signed(), vals} + typ = &Enum{name, header.Size(), header.Signed(), vals} default: - return nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind()) + return nil, fmt.Errorf("type id %d: unknown kind: %v", id, header.Kind()) } types = append(types, typ) diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go index a581ecf44f..a5532220fd 100644 --- a/vendor/github.com/cilium/ebpf/collection.go +++ b/vendor/github.com/cilium/ebpf/collection.go @@ -626,17 +626,20 @@ func resolveKconfig(m *MapSpec) error { internal.NativeEndian.PutUint32(data[vsi.Offset:], kv.Kernel()) case "LINUX_HAS_SYSCALL_WRAPPER": - if integer, ok := v.Type.(*btf.Int); !ok || integer.Size != 4 { - return fmt.Errorf("variable %s must be a 32 bits integer, got %s", n, v.Type) + integer, ok := v.Type.(*btf.Int) + if !ok { + return fmt.Errorf("variable %s must be an integer, got %s", n, v.Type) } - var value uint32 = 1 + var value uint64 = 1 if err := haveSyscallWrapper(); errors.Is(err, ErrNotSupported) { value = 0 } else if err != nil { return fmt.Errorf("unable to derive a value for LINUX_HAS_SYSCALL_WRAPPER: %w", err) } - internal.NativeEndian.PutUint32(data[vsi.Offset:], value) + if err := kconfig.PutInteger(data[vsi.Offset:], integer, value); err != nil { + return fmt.Errorf("set LINUX_HAS_SYSCALL_WRAPPER: %w", err) + } default: // Catch CONFIG_*. configs[n] = configInfo{ @@ -695,6 +698,71 @@ func LoadCollection(file string) (*Collection, error) { return NewCollection(spec) } +// Assign the contents of a Collection to a struct. +// +// This function bridges functionality between bpf2go generated +// code and any functionality better implemented in Collection. +// +// 'to' must be a pointer to a struct. A field of the +// struct is updated with values from Programs or Maps if it +// has an `ebpf` tag and its type is *Program or *Map. +// The tag's value specifies the name of the program or map as +// found in the CollectionSpec. +// +// struct { +// Foo *ebpf.Program `ebpf:"xdp_foo"` +// Bar *ebpf.Map `ebpf:"bar_map"` +// Ignored int +// } +// +// Returns an error if any of the eBPF objects can't be found, or +// if the same Map or Program is assigned multiple times. +// +// Ownership and Close()ing responsibility is transferred to `to` +// for any successful assigns. On error `to` is left in an undefined state. +func (coll *Collection) Assign(to interface{}) error { + assignedMaps := make(map[string]bool) + assignedProgs := make(map[string]bool) + + // Assign() only transfers already-loaded Maps and Programs. No extra + // loading is done. + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + + case reflect.TypeOf((*Program)(nil)): + if p := coll.Programs[name]; p != nil { + assignedProgs[name] = true + return p, nil + } + return nil, fmt.Errorf("missing program %q", name) + + case reflect.TypeOf((*Map)(nil)): + if m := coll.Maps[name]; m != nil { + assignedMaps[name] = true + return m, nil + } + return nil, fmt.Errorf("missing map %q", name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + if err := assignValues(to, getValue); err != nil { + return err + } + + // Finalize ownership transfer + for p := range assignedProgs { + delete(coll.Programs, p) + } + for m := range assignedMaps { + delete(coll.Maps, m) + } + + return nil +} + // Close frees all maps and programs associated with the collection. // // The collection mustn't be used afterwards. diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go index 5e0bb98ea1..5a85cbc24f 100644 --- a/vendor/github.com/cilium/ebpf/elf_reader.go +++ b/vendor/github.com/cilium/ebpf/elf_reader.go @@ -373,7 +373,7 @@ func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructio r := bufio.NewReader(section.Open()) // Decode the section's instruction stream. - var insns asm.Instructions + insns := make(asm.Instructions, 0, section.Size/asm.InstructionSize) if err := insns.Unmarshal(r, ec.ByteOrder); err != nil { return nil, fmt.Errorf("decoding instructions for section %s: %w", section.Name, err) } @@ -467,10 +467,14 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err switch target.kind { case mapSection, btfMapSection: - if bind != elf.STB_GLOBAL { + if bind == elf.STB_LOCAL { return fmt.Errorf("possible erroneous static qualifier on map definition: found reference to %q", name) } + if bind != elf.STB_GLOBAL { + return fmt.Errorf("map %q: unsupported relocation %s", name, bind) + } + if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE { // STT_NOTYPE is generated on clang < 8 which doesn't tag // relocations appropriately. diff --git a/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go index d95e7eb0e5..fa53085782 100644 --- a/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go +++ b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go @@ -250,7 +250,20 @@ func putValueNumber(data []byte, typ btf.Type, value string) error { return fmt.Errorf("cannot parse value: %w", err) } - switch size { + return PutInteger(data, integer, n) +} + +// PutInteger writes n into data. +// +// integer determines how much is written into data and what the valid values +// are. +func PutInteger(data []byte, integer *btf.Int, n uint64) error { + // This function should match set_kcfg_value_num in libbpf. + if integer.Encoding == btf.Bool && n > 1 { + return fmt.Errorf("invalid boolean value: %d", n) + } + + switch integer.Size { case 1: data[0] = byte(n) case 2: @@ -260,7 +273,7 @@ func putValueNumber(data []byte, typ btf.Type, value string) error { case 8: internal.NativeEndian.PutUint64(data, uint64(n)) default: - return fmt.Errorf("size (%d) is not valid, expected: 1, 2, 4 or 8", size) + return fmt.Errorf("size (%d) is not valid, expected: 1, 2, 4 or 8", integer.Size) } return nil diff --git a/vendor/github.com/cilium/ebpf/internal/sys/types.go b/vendor/github.com/cilium/ebpf/internal/sys/types.go index b7e3244adf..51698e06b4 100644 --- a/vendor/github.com/cilium/ebpf/internal/sys/types.go +++ b/vendor/github.com/cilium/ebpf/internal/sys/types.go @@ -60,7 +60,12 @@ const ( BPF_PERF_EVENT AttachType = 41 BPF_TRACE_KPROBE_MULTI AttachType = 42 BPF_LSM_CGROUP AttachType = 43 - __MAX_BPF_ATTACH_TYPE AttachType = 44 + BPF_STRUCT_OPS AttachType = 44 + BPF_NETFILTER AttachType = 45 + BPF_TCX_INGRESS AttachType = 46 + BPF_TCX_EGRESS AttachType = 47 + BPF_TRACE_UPROBE_MULTI AttachType = 48 + __MAX_BPF_ATTACH_TYPE AttachType = 49 ) type Cmd uint32 @@ -318,7 +323,9 @@ const ( BPF_FUNC_tcp_raw_check_syncookie_ipv6 FunctionId = 207 BPF_FUNC_ktime_get_tai_ns FunctionId = 208 BPF_FUNC_user_ringbuf_drain FunctionId = 209 - __BPF_FUNC_MAX_ID FunctionId = 210 + BPF_FUNC_cgrp_storage_get FunctionId = 210 + BPF_FUNC_cgrp_storage_delete FunctionId = 211 + __BPF_FUNC_MAX_ID FunctionId = 212 ) type HdrStartOff uint32 @@ -341,44 +348,49 @@ const ( BPF_LINK_TYPE_PERF_EVENT LinkType = 7 BPF_LINK_TYPE_KPROBE_MULTI LinkType = 8 BPF_LINK_TYPE_STRUCT_OPS LinkType = 9 - MAX_BPF_LINK_TYPE LinkType = 10 + BPF_LINK_TYPE_NETFILTER LinkType = 10 + BPF_LINK_TYPE_TCX LinkType = 11 + BPF_LINK_TYPE_UPROBE_MULTI LinkType = 12 + MAX_BPF_LINK_TYPE LinkType = 13 ) type MapType uint32 const ( - BPF_MAP_TYPE_UNSPEC MapType = 0 - BPF_MAP_TYPE_HASH MapType = 1 - BPF_MAP_TYPE_ARRAY MapType = 2 - BPF_MAP_TYPE_PROG_ARRAY MapType = 3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4 - BPF_MAP_TYPE_PERCPU_HASH MapType = 5 - BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6 - BPF_MAP_TYPE_STACK_TRACE MapType = 7 - BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8 - BPF_MAP_TYPE_LRU_HASH MapType = 9 - BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10 - BPF_MAP_TYPE_LPM_TRIE MapType = 11 - BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12 - BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13 - BPF_MAP_TYPE_DEVMAP MapType = 14 - BPF_MAP_TYPE_SOCKMAP MapType = 15 - BPF_MAP_TYPE_CPUMAP MapType = 16 - BPF_MAP_TYPE_XSKMAP MapType = 17 - BPF_MAP_TYPE_SOCKHASH MapType = 18 - BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21 - BPF_MAP_TYPE_QUEUE MapType = 22 - BPF_MAP_TYPE_STACK MapType = 23 - BPF_MAP_TYPE_SK_STORAGE MapType = 24 - BPF_MAP_TYPE_DEVMAP_HASH MapType = 25 - BPF_MAP_TYPE_STRUCT_OPS MapType = 26 - BPF_MAP_TYPE_RINGBUF MapType = 27 - BPF_MAP_TYPE_INODE_STORAGE MapType = 28 - BPF_MAP_TYPE_TASK_STORAGE MapType = 29 - BPF_MAP_TYPE_BLOOM_FILTER MapType = 30 - BPF_MAP_TYPE_USER_RINGBUF MapType = 31 + BPF_MAP_TYPE_UNSPEC MapType = 0 + BPF_MAP_TYPE_HASH MapType = 1 + BPF_MAP_TYPE_ARRAY MapType = 2 + BPF_MAP_TYPE_PROG_ARRAY MapType = 3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4 + BPF_MAP_TYPE_PERCPU_HASH MapType = 5 + BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6 + BPF_MAP_TYPE_STACK_TRACE MapType = 7 + BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8 + BPF_MAP_TYPE_LRU_HASH MapType = 9 + BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10 + BPF_MAP_TYPE_LPM_TRIE MapType = 11 + BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12 + BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13 + BPF_MAP_TYPE_DEVMAP MapType = 14 + BPF_MAP_TYPE_SOCKMAP MapType = 15 + BPF_MAP_TYPE_CPUMAP MapType = 16 + BPF_MAP_TYPE_XSKMAP MapType = 17 + BPF_MAP_TYPE_SOCKHASH MapType = 18 + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED MapType = 19 + BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21 + BPF_MAP_TYPE_QUEUE MapType = 22 + BPF_MAP_TYPE_STACK MapType = 23 + BPF_MAP_TYPE_SK_STORAGE MapType = 24 + BPF_MAP_TYPE_DEVMAP_HASH MapType = 25 + BPF_MAP_TYPE_STRUCT_OPS MapType = 26 + BPF_MAP_TYPE_RINGBUF MapType = 27 + BPF_MAP_TYPE_INODE_STORAGE MapType = 28 + BPF_MAP_TYPE_TASK_STORAGE MapType = 29 + BPF_MAP_TYPE_BLOOM_FILTER MapType = 30 + BPF_MAP_TYPE_USER_RINGBUF MapType = 31 + BPF_MAP_TYPE_CGRP_STORAGE MapType = 32 ) type ProgType uint32 @@ -416,6 +428,7 @@ const ( BPF_PROG_TYPE_LSM ProgType = 29 BPF_PROG_TYPE_SK_LOOKUP ProgType = 30 BPF_PROG_TYPE_SYSCALL ProgType = 31 + BPF_PROG_TYPE_NETFILTER ProgType = 32 ) type RetCode uint32 @@ -594,12 +607,12 @@ func BtfGetNextId(attr *BtfGetNextIdAttr) error { } type BtfLoadAttr struct { - Btf Pointer - BtfLogBuf Pointer - BtfSize uint32 - BtfLogSize uint32 - BtfLogLevel uint32 - _ [4]byte + Btf Pointer + BtfLogBuf Pointer + BtfSize uint32 + BtfLogSize uint32 + BtfLogLevel uint32 + BtfLogTrueSize uint32 } func BtfLoad(attr *BtfLoadAttr) (*FD, error) { @@ -639,7 +652,7 @@ type LinkCreateAttr struct { AttachType AttachType Flags uint32 TargetBtfId TypeID - _ [28]byte + _ [44]byte } func LinkCreate(attr *LinkCreateAttr) (*FD, error) { @@ -657,7 +670,7 @@ type LinkCreateIterAttr struct { Flags uint32 IterInfo Pointer IterInfoLen uint32 - _ [20]byte + _ [36]byte } func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) { @@ -678,6 +691,7 @@ type LinkCreateKprobeMultiAttr struct { Syms Pointer Addrs Pointer Cookies Pointer + _ [16]byte } func LinkCreateKprobeMulti(attr *LinkCreateKprobeMultiAttr) (*FD, error) { @@ -694,7 +708,7 @@ type LinkCreatePerfEventAttr struct { AttachType AttachType Flags uint32 BpfCookie uint64 - _ [24]byte + _ [40]byte } func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) { @@ -713,7 +727,7 @@ type LinkCreateTracingAttr struct { TargetBtfId BTFID _ [4]byte Cookie uint64 - _ [16]byte + _ [32]byte } func LinkCreateTracing(attr *LinkCreateTracingAttr) (*FD, error) { @@ -920,6 +934,8 @@ type ObjGetAttr struct { Pathname Pointer BpfFd uint32 FileFlags uint32 + PathFd int32 + _ [4]byte } func ObjGet(attr *ObjGetAttr) (*FD, error) { @@ -945,6 +961,8 @@ type ObjPinAttr struct { Pathname Pointer BpfFd uint32 FileFlags uint32 + PathFd int32 + _ [4]byte } func ObjPin(attr *ObjPinAttr) error { @@ -953,11 +971,13 @@ func ObjPin(attr *ObjPinAttr) error { } type ProgAttachAttr struct { - TargetFd uint32 - AttachBpfFd uint32 - AttachType uint32 - AttachFlags uint32 - ReplaceBpfFd uint32 + TargetFd uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + ReplaceBpfFd uint32 + RelativeFd uint32 + ExpectedRevision uint64 } func ProgAttach(attr *ProgAttachAttr) error { @@ -1033,7 +1053,7 @@ type ProgLoadAttr struct { FdArray Pointer CoreRelos Pointer CoreReloRecSize uint32 - _ [4]byte + LogTrueSize uint32 } func ProgLoad(attr *ProgLoadAttr) (*FD, error) { @@ -1052,7 +1072,10 @@ type ProgQueryAttr struct { ProgIds Pointer ProgCount uint32 _ [4]byte - ProgAttachFlags uint64 + ProgAttachFlags Pointer + LinkIds Pointer + LinkAttachFlags Pointer + Revision uint64 } func ProgQuery(attr *ProgQueryAttr) error { diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go index b653b805e6..cf5b02ddf5 100644 --- a/vendor/github.com/cilium/ebpf/linker.go +++ b/vendor/github.com/cilium/ebpf/linker.go @@ -7,6 +7,8 @@ import ( "io" "math" + "golang.org/x/exp/slices" + "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" @@ -57,21 +59,33 @@ func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) { return nil, errors.New("insns is empty") } - if insns[0].Symbol() == "" { + currentSym := insns[0].Symbol() + if currentSym == "" { return nil, errors.New("insns must start with a Symbol") } - var name string + start := 0 progs := make(map[string]asm.Instructions) - for _, ins := range insns { - if sym := ins.Symbol(); sym != "" { - if progs[sym] != nil { - return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym) - } - name = sym + for i, ins := range insns[1:] { + i := i + 1 + + sym := ins.Symbol() + if sym == "" { + continue } - progs[name] = append(progs[name], ins) + // New symbol, flush the old one out. + progs[currentSym] = slices.Clone(insns[start:i]) + + if progs[sym] != nil { + return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym) + } + currentSym = sym + start = i + } + + if tail := insns[start:]; len(tail) > 0 { + progs[currentSym] = slices.Clone(tail) } return progs, nil diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go index 7f6184850b..ce945ace08 100644 --- a/vendor/github.com/cilium/ebpf/map.go +++ b/vendor/github.com/cilium/ebpf/map.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "reflect" + "strings" "time" "unsafe" @@ -190,26 +191,32 @@ func (ms *MapSpec) Compatible(m *Map) error { return err } - switch { - case m.typ != ms.Type: - return fmt.Errorf("expected type %v, got %v: %w", ms.Type, m.typ, ErrMapIncompatible) - - case m.keySize != ms.KeySize: - return fmt.Errorf("expected key size %v, got %v: %w", ms.KeySize, m.keySize, ErrMapIncompatible) - - case m.valueSize != ms.ValueSize: - return fmt.Errorf("expected value size %v, got %v: %w", ms.ValueSize, m.valueSize, ErrMapIncompatible) + diffs := []string{} + if m.typ != ms.Type { + diffs = append(diffs, fmt.Sprintf("Type: %s changed to %s", m.typ, ms.Type)) + } + if m.keySize != ms.KeySize { + diffs = append(diffs, fmt.Sprintf("KeySize: %d changed to %d", m.keySize, ms.KeySize)) + } + if m.valueSize != ms.ValueSize { + diffs = append(diffs, fmt.Sprintf("ValueSize: %d changed to %d", m.valueSize, ms.ValueSize)) + } + if m.maxEntries != ms.MaxEntries { + diffs = append(diffs, fmt.Sprintf("MaxEntries: %d changed to %d", m.maxEntries, ms.MaxEntries)) + } - case m.maxEntries != ms.MaxEntries: - return fmt.Errorf("expected max entries %v, got %v: %w", ms.MaxEntries, m.maxEntries, ErrMapIncompatible) + // BPF_F_RDONLY_PROG is set unconditionally for devmaps. Explicitly allow this + // mismatch. + if !((ms.Type == DevMap || ms.Type == DevMapHash) && m.flags^ms.Flags == unix.BPF_F_RDONLY_PROG) && + m.flags != ms.Flags { + diffs = append(diffs, fmt.Sprintf("Flags: %d changed to %d", m.flags, ms.Flags)) + } - // BPF_F_RDONLY_PROG is set unconditionally for devmaps. Explicitly allow - // this mismatch. - case !((ms.Type == DevMap || ms.Type == DevMapHash) && m.flags^ms.Flags == unix.BPF_F_RDONLY_PROG) && - m.flags != ms.Flags: - return fmt.Errorf("expected flags %v, got %v: %w", ms.Flags, m.flags, ErrMapIncompatible) + if len(diffs) == 0 { + return nil } - return nil + + return fmt.Errorf("%s: %w", strings.Join(diffs, ", "), ErrMapIncompatible) } // Map represents a Map file descriptor. diff --git a/vendor/github.com/cilium/ebpf/rlimit/rlimit.go b/vendor/github.com/cilium/ebpf/rlimit/rlimit.go deleted file mode 100644 index 2a6973744f..0000000000 --- a/vendor/github.com/cilium/ebpf/rlimit/rlimit.go +++ /dev/null @@ -1,123 +0,0 @@ -// Package rlimit allows raising RLIMIT_MEMLOCK if necessary for the use of BPF. -package rlimit - -import ( - "errors" - "fmt" - "sync" - - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/sys" - "github.com/cilium/ebpf/internal/unix" -) - -var ( - unsupportedMemcgAccounting = &internal.UnsupportedFeatureError{ - MinimumVersion: internal.Version{5, 11, 0}, - Name: "memcg-based accounting for BPF memory", - } - haveMemcgAccounting error - - rlimitMu sync.Mutex -) - -func init() { - // We have to run this feature test at init, since it relies on changing - // RLIMIT_MEMLOCK. Doing so is not safe in a concurrent program. Instead, - // we rely on the initialization order guaranteed by the Go runtime to - // execute the test in a safe environment: - // - // the invocation of init functions happens in a single goroutine, - // sequentially, one package at a time. - // - // This is also the reason why RemoveMemlock is in its own package: - // we only want to run the initializer if RemoveMemlock is called - // from somewhere. - haveMemcgAccounting = detectMemcgAccounting() -} - -func detectMemcgAccounting() error { - // Retrieve the original limit to prevent lowering Max, since - // doing so is a permanent operation when running unprivileged. - var oldLimit unix.Rlimit - if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, nil, &oldLimit); err != nil { - return fmt.Errorf("getting original memlock rlimit: %s", err) - } - - // Drop the current limit to zero, maintaining the old Max value. - // This is always permitted by the kernel for unprivileged users. - // Retrieve a new copy of the old limit tuple to minimize the chances - // of failing the restore operation below. - zeroLimit := unix.Rlimit{Cur: 0, Max: oldLimit.Max} - if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, &zeroLimit, &oldLimit); err != nil { - return fmt.Errorf("lowering memlock rlimit: %s", err) - } - - attr := sys.MapCreateAttr{ - MapType: 2, /* Array */ - KeySize: 4, - ValueSize: 4, - MaxEntries: 1, - } - - // Creating a map allocates shared (and locked) memory that counts against - // the rlimit on pre-5.11 kernels, but against the memory cgroup budget on - // kernels 5.11 and over. If this call succeeds with the process' memlock - // rlimit set to 0, we can reasonably assume memcg accounting is supported. - fd, mapErr := sys.MapCreate(&attr) - - // Restore old limits regardless of what happened. - if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, &oldLimit, nil); err != nil { - return fmt.Errorf("restoring old memlock rlimit: %s", err) - } - - // Map creation successful, memcg accounting supported. - if mapErr == nil { - fd.Close() - return nil - } - - // EPERM shows up when map creation would exceed the memory budget. - if errors.Is(mapErr, unix.EPERM) { - return unsupportedMemcgAccounting - } - - // This shouldn't happen really. - return fmt.Errorf("unexpected error detecting memory cgroup accounting: %s", mapErr) -} - -// RemoveMemlock removes the limit on the amount of memory the current -// process can lock into RAM, if necessary. -// -// This is not required to load eBPF resources on kernel versions 5.11+ -// due to the introduction of cgroup-based memory accounting. On such kernels -// the function is a no-op. -// -// Since the function may change global per-process limits it should be invoked -// at program start up, in main() or init(). -// -// This function exists as a convenience and should only be used when -// permanently raising RLIMIT_MEMLOCK to infinite is appropriate. Consider -// invoking prlimit(2) directly with a more reasonable limit if desired. -// -// Requires CAP_SYS_RESOURCE on kernels < 5.11. -func RemoveMemlock() error { - if haveMemcgAccounting == nil { - return nil - } - - if !errors.Is(haveMemcgAccounting, unsupportedMemcgAccounting) { - return haveMemcgAccounting - } - - rlimitMu.Lock() - defer rlimitMu.Unlock() - - // pid 0 affects the current process. Requires CAP_SYS_RESOURCE. - newLimit := unix.Rlimit{Cur: unix.RLIM_INFINITY, Max: unix.RLIM_INFINITY} - if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, &newLimit, nil); err != nil { - return fmt.Errorf("failed to set memlock rlimit: %w", err) - } - - return nil -} diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 352018e703..5edd5a7ca9 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,11 +1,15 @@ # Change history of go-restful -## [v3.10.2] - 2023-03-09 +## [v3.11.0] - 2023-08-19 + +- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. + +## [v3.10.2] - 2023-03-09 - DO NOT USE - introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0 see comment in Readme how to customize this behaviour. -## [v3.10.1] - 2022-11-19 +## [v3.10.1] - 2022-11-19 - DO NOT USE - fix broken 3.10.0 by using path package for joining paths diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 85da90128e..e3e30080ec 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -79,7 +79,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo - Content encoding (gzip,deflate) of request and response payloads - Automatic responses on OPTIONS (using a filter) - Automatic CORS request handling (using a filter) -- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12)) +- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi)) - Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) - Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) - Configurable (trace) logging @@ -96,10 +96,7 @@ There are several hooks to customize the behavior of the go-restful package. - Compression - Encoders for other serializers - Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` -- Use the variable `MergePathStrategy` to change the behaviour of composing the Route path given a root path and a local route path - - versions >= 3.10.1 has set the value to `PathJoinStrategy` that fixes a reported [security issue](https://github.com/advisories/GHSA-r48q-9g5r-8q2h) but may cause your services not to work correctly anymore. - - versions <= 3.9 had the behaviour that can be restored in newer versions by setting the value to `TrimSlashStrategy`. - - you can set value to a custom implementation (must implement MergePathStrategyFunc) +- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` ## Resources @@ -112,4 +109,4 @@ There are several hooks to customize the behavior of the go-restful package. Type ```git shortlog -s``` for a full list of contributors. -© 2012 - 2022, http://ernestmicklei.com. MIT License. Contributions are welcome. +© 2012 - 2023, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go index ea05b3da88..306c44be77 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -40,7 +40,8 @@ type Route struct { ParameterDocs []*Parameter ResponseErrors map[int]ResponseError DefaultResponse *ResponseError - ReadSample, WriteSample interface{} // structs that model an example request or response payload + ReadSample, WriteSample interface{} // structs that model an example request or response payload + WriteSamples []interface{} // if more than one return types is possible (oneof) then this will contain multiple values // Extra information used to store custom information about the route. Metadata map[string]interface{} @@ -164,7 +165,13 @@ func tokenizePath(path string) []string { if "/" == path { return nil } - return strings.Split(strings.TrimLeft(path, "/"), "/") + if TrimRightSlashEnabled { + // 3.9.0 + return strings.Split(strings.Trim(path, "/"), "/") + } else { + // 3.10.2 + return strings.Split(strings.TrimLeft(path, "/"), "/") + } } // for debugging @@ -177,4 +184,8 @@ func (r *Route) EnableContentEncoding(enabled bool) { r.contentEncodingEnabled = &enabled } -var TrimRightSlashEnabled = false +// TrimRightSlashEnabled controls whether +// - path on route building is using path.Join +// - the path of the incoming request is trimmed of its slash suffux. +// Value of true matches the behavior of <= 3.9.0 +var TrimRightSlashEnabled = true diff --git a/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go index 827f471de0..75168c12e1 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -31,17 +31,18 @@ type RouteBuilder struct { typeNameHandleFunc TypeNameHandleFunction // required // documentation - doc string - notes string - operation string - readSample, writeSample interface{} - parameters []*Parameter - errorMap map[int]ResponseError - defaultResponse *ResponseError - metadata map[string]interface{} - extensions map[string]interface{} - deprecated bool - contentEncodingEnabled *bool + doc string + notes string + operation string + readSample interface{} + writeSamples []interface{} + parameters []*Parameter + errorMap map[int]ResponseError + defaultResponse *ResponseError + metadata map[string]interface{} + extensions map[string]interface{} + deprecated bool + contentEncodingEnabled *bool } // Do evaluates each argument with the RouteBuilder itself. @@ -135,9 +136,9 @@ func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) { return p } -// Writes tells what resource type will be written as the response payload. Optional. -func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder { - b.writeSample = sample +// Writes tells which one of the resource types will be written as the response payload. Optional. +func (b *RouteBuilder) Writes(samples ...interface{}) *RouteBuilder { + b.writeSamples = samples // oneof return b } @@ -342,39 +343,29 @@ func (b *RouteBuilder) Build() Route { ResponseErrors: b.errorMap, DefaultResponse: b.defaultResponse, ReadSample: b.readSample, - WriteSample: b.writeSample, + WriteSamples: b.writeSamples, Metadata: b.metadata, Deprecated: b.deprecated, contentEncodingEnabled: b.contentEncodingEnabled, allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType, } + // set WriteSample if one specified + if len(b.writeSamples) == 1 { + route.WriteSample = b.writeSamples[0] + } route.Extensions = b.extensions route.postBuild() return route } -type MergePathStrategyFunc func(rootPath, routePath string) string - -var ( - // behavior >= 3.10 - PathJoinStrategy = func(rootPath, routePath string) string { - return path.Join(rootPath, routePath) - } +// merge two paths using the current (package global) merge path strategy. +func concatPath(rootPath, routePath string) string { - // behavior <= 3.9 - TrimSlashStrategy = func(rootPath, routePath string) string { + if TrimRightSlashEnabled { return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/") + } else { + return path.Join(rootPath, routePath) } - - // MergePathStrategy is the active strategy for merging a Route path when building the routing of all WebServices. - // The value is set to PathJoinStrategy - // PathJoinStrategy is a strategy that is more strict [Security - PRISMA-2022-0227] - MergePathStrategy = PathJoinStrategy -) - -// merge two paths using the current (package global) merge path strategy. -func concatPath(rootPath, routePath string) string { - return MergePathStrategy(rootPath, routePath) } var anonymousFuncCount int32 diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index ab59311813..a8c29bfbd5 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging without becoming coupled to a particular logging implementation. This is not @@ -73,6 +74,29 @@ received: If the Go standard library had defined an interface for logging, this project probably would not be needed. Alas, here we are. +When the Go developers started developing such an interface with +[slog](https://github.com/golang/go/issues/56345), they adopted some of the +logr design but also left out some parts and changed others: + +| Feature | logr | slog | +|---------|------|------| +| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) | +| Low-level API | `LogSink` | `Handler` | +| Stack unwinding | done by `LogSink` | done by `Logger` | +| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) | +| Generating a value for logging on demand | `Marshaler` | `LogValuer` | +| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" | +| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` | +| Passing logger via context | `NewContext`, `FromContext` | no API | +| Adding a name to a logger | `WithName` | no API | +| Modify verbosity of log entries in a call chain | `V` | no API | +| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | + +The high-level slog API is explicitly meant to be one of many different APIs +that can be layered on top of a shared `slog.Handler`. logr is one such +alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) +package. + ### Inspiration Before you consider this package, please read [this blog post by the @@ -118,6 +142,91 @@ There are implementations for the following logging libraries: - **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) - **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) +## slog interoperability + +Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` +and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and +`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. +As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level +slog API. `slogr` itself leaves that to the caller. + +## Using a `logr.Sink` as backend for slog + +Ideally, a logr sink implementation should support both logr and slog by +implementing both the normal logr interface(s) and `slogr.SlogSink`. Because +of a conflict in the parameters of the common `Enabled` method, it is [not +possible to implement both slog.Handler and logr.Sink in the same +type](https://github.com/golang/go/issues/59110). + +If both are supported, log calls can go from the high-level APIs to the backend +without the need to convert parameters. `NewLogr` and `NewSlogHandler` can +convert back and forth without adding additional wrappers, with one exception: +when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then +`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future +log calls. + +Such an implementation should also support values that implement specific +interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`, +`slog.GroupValue`). logr does not convert those. + +Not supporting slog has several drawbacks: +- Recording source code locations works correctly if the handler gets called + through `slog.Logger`, but may be wrong in other cases. That's because a + `logr.Sink` does its own stack unwinding instead of using the program counter + provided by the high-level API. +- slog levels <= 0 can be mapped to logr levels by negating the level without a + loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as + used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink + because logr does not support "more important than info" levels. +- The slog group concept is supported by prefixing each key in a key/value + pair with the group names, separated by a dot. For structured output like + JSON it would be better to group the key/value pairs inside an object. +- Special slog values and interfaces don't work as expected. +- The overhead is likely to be higher. + +These drawbacks are severe enough that applications using a mixture of slog and +logr should switch to a different backend. + +## Using a `slog.Handler` as backend for logr + +Using a plain `slog.Handler` without support for logr works better than the +other direction: +- All logr verbosity levels can be mapped 1:1 to their corresponding slog level + by negating them. +- Stack unwinding is done by the `slogr.SlogSink` and the resulting program + counter is passed to the `slog.Handler`. +- Names added via `Logger.WithName` are gathered and recorded in an additional + attribute with `logger` as key and the names separated by slash as value. +- `Logger.Error` is turned into a log record with `slog.LevelError` as level + and an additional attribute with `err` as key, if an error was provided. + +The main drawback is that `logr.Marshaler` will not be supported. Types should +ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility +with logr implementations without slog support is not important, then +`slog.Valuer` is sufficient. + +## Context support for slog + +Storing a logger in a `context.Context` is not supported by +slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this +to fill this gap: + + func HandlerFromContext(ctx context.Context) slog.Handler { + logger, err := logr.FromContext(ctx) + if err == nil { + return slogr.NewSlogHandler(logger) + } + return slog.Default().Handler() + } + + func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { + return logr.NewContext(ctx, slogr.NewLogr(handler)) + } + +The downside is that storing and retrieving a `slog.Handler` needs more +allocations compared to using a `logr.Logger`. Therefore the recommendation is +to use the `logr.Logger` API in code which uses contextual logging. + ## FAQ ### Conceptual @@ -241,7 +350,9 @@ Otherwise, you can start out with `0` as "you always want to see this", Then gradually choose levels in between as you need them, working your way down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs.) +info-type logs). For reference, slog pre-defines -4 for debug logs +(corresponds to 4 in logr), which matches what is +[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). #### How do I choose my keys? diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md new file mode 100644 index 0000000000..1ca756fc7b --- /dev/null +++ b/vendor/github.com/go-logr/logr/SECURITY.md @@ -0,0 +1,18 @@ +# Security Policy + +If you have discovered a security vulnerability in this project, please report it +privately. **Do not disclose it as a public issue.** This gives us time to work with you +to fix the issue before public exposure, reducing the chance that the exploit will be +used before a patch is released. + +You may submit the report in the following ways: + +- send an email to go-logr-security@googlegroups.com +- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new) + +Please provide the following information in your report: + +- A description of the vulnerability and its impact +- How to reproduce the issue + +We ask that you give us 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index e52f0cd01e..12e5807cc5 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -116,17 +116,17 @@ type Options struct { // Equivalent hooks are offered for key-value pairs saved via // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and // for user-provided pairs (see RenderArgsHook). - RenderBuiltinsHook func(kvList []interface{}) []interface{} + RenderBuiltinsHook func(kvList []any) []any // RenderValuesHook is the same as RenderBuiltinsHook, except that it is // only called for key-value pairs saved via logr.Logger.WithValues. See // RenderBuiltinsHook for more details. - RenderValuesHook func(kvList []interface{}) []interface{} + RenderValuesHook func(kvList []any) []any // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only // called for key-value pairs passed directly to Info and Error. See // RenderBuiltinsHook for more details. - RenderArgsHook func(kvList []interface{}) []interface{} + RenderArgsHook func(kvList []any) []any // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct // that contains a struct, etc.) it may log. Every time it finds a struct, @@ -163,7 +163,7 @@ func (l fnlogger) WithName(name string) logr.LogSink { return &l } -func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { +func (l fnlogger) WithValues(kvList ...any) logr.LogSink { l.Formatter.AddValues(kvList) return &l } @@ -173,12 +173,12 @@ func (l fnlogger) WithCallDepth(depth int) logr.LogSink { return &l } -func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { +func (l fnlogger) Info(level int, msg string, kvList ...any) { prefix, args := l.FormatInfo(level, msg, kvList) l.write(prefix, args) } -func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { +func (l fnlogger) Error(err error, msg string, kvList ...any) { prefix, args := l.FormatError(err, msg, kvList) l.write(prefix, args) } @@ -229,7 +229,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { type Formatter struct { outputFormat outputFormat prefix string - values []interface{} + values []any valuesStr string depth int opts *Options @@ -246,10 +246,10 @@ const ( ) // PseudoStruct is a list of key-value pairs that gets logged as a struct. -type PseudoStruct []interface{} +type PseudoStruct []any // render produces a log line, ready to use. -func (f Formatter) render(builtins, args []interface{}) string { +func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) if f.outputFormat == outputJSON { @@ -292,7 +292,7 @@ func (f Formatter) render(builtins, args []interface{}) string { // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -334,7 +334,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing b return kvList } -func (f Formatter) pretty(value interface{}) string { +func (f Formatter) pretty(value any) string { return f.prettyWithFlags(value, 0, 0) } @@ -343,7 +343,7 @@ const ( ) // TODO: This is not fast. Most of the overhead goes here. -func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { +func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { if depth > f.opts.MaxLogDepth { return `""` } @@ -614,7 +614,7 @@ func isEmpty(v reflect.Value) bool { return false } -func invokeMarshaler(m logr.Marshaler) (ret interface{}) { +func invokeMarshaler(m logr.Marshaler) (ret any) { defer func() { if r := recover(); r != nil { ret = fmt.Sprintf("", r) @@ -675,12 +675,12 @@ func (f Formatter) caller() Caller { const noValue = "" -func (f Formatter) nonStringKey(v interface{}) string { +func (f Formatter) nonStringKey(v any) string { return fmt.Sprintf("", f.snippet(v)) } // snippet produces a short snippet string of an arbitrary value. -func (f Formatter) snippet(v interface{}) string { +func (f Formatter) snippet(v any) string { const snipLen = 16 snip := f.pretty(v) @@ -693,7 +693,7 @@ func (f Formatter) snippet(v interface{}) string { // sanitize ensures that a list of key-value pairs has a value for every key // (adding a value if needed) and that each key is a string (substituting a key // if needed). -func (f Formatter) sanitize(kvList []interface{}) []interface{} { +func (f Formatter) sanitize(kvList []any) []any { if len(kvList)%2 != 0 { kvList = append(kvList, noValue) } @@ -727,8 +727,8 @@ func (f Formatter) GetDepth() int { // FormatInfo renders an Info log message into strings. The prefix will be // empty when no names were set (via AddNames), or when the output is // configured for JSON. -func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { - args := make([]interface{}, 0, 64) // using a constant here impacts perf +func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf prefix = f.prefix if f.outputFormat == outputJSON { args = append(args, "logger", prefix) @@ -745,10 +745,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (pref } // FormatError renders an Error log message into strings. The prefix will be -// empty when no names were set (via AddNames), or when the output is +// empty when no names were set (via AddNames), or when the output is // configured for JSON. -func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { - args := make([]interface{}, 0, 64) // using a constant here impacts perf +func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf prefix = f.prefix if f.outputFormat == outputJSON { args = append(args, "logger", prefix) @@ -761,12 +761,12 @@ func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (pre args = append(args, "caller", f.caller()) } args = append(args, "msg", msg) - var loggableErr interface{} + var loggableErr any if err != nil { loggableErr = err.Error() } args = append(args, "error", loggableErr) - return f.prefix, f.render(args, kvList) + return prefix, f.render(args, kvList) } // AddName appends the specified name. funcr uses '/' characters to separate @@ -781,7 +781,7 @@ func (f *Formatter) AddName(name string) { // AddValues adds key-value pairs to the set of saved values to be logged with // each log line. -func (f *Formatter) AddValues(kvList []interface{}) { +func (f *Formatter) AddValues(kvList []any) { // Three slice args forces a copy. n := len(f.values) f.values = append(f.values[:n:n], kvList...) diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index e027aea3fd..2a5075a180 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -127,9 +127,9 @@ limitations under the License. // such a value can call its methods without having to check whether the // instance is ready for use. // -// Calling methods with the null logger (Logger{}) as instance will crash -// because it has no LogSink. Therefore this null logger should never be passed -// around. For cases where passing a logger is optional, a pointer to Logger +// The zero logger (= Logger{}) is identical to Discard() and discards all log +// entries. Code that receives a Logger by value can simply call it, the methods +// will never crash. For cases where passing a logger is optional, a pointer to Logger // should be used. // // # Key Naming Conventions @@ -258,6 +258,12 @@ type Logger struct { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info logs. func (l Logger) Enabled() bool { + // Some implementations of LogSink look at the caller in Enabled (e.g. + // different verbosity levels per package or file), but we only pass one + // CallDepth in (via Init). This means that all calls from Logger to the + // LogSink's Enabled, Info, and Error methods must have the same number of + // frames. In other words, Logger methods can't call other Logger methods + // which call these LogSink methods unless we do it the same in all paths. return l.sink != nil && l.sink.Enabled(l.level) } @@ -267,11 +273,11 @@ func (l Logger) Enabled() bool { // line. The key/value pairs can then be used to add additional variable // information. The key/value pairs must alternate string keys and arbitrary // values. -func (l Logger) Info(msg string, keysAndValues ...interface{}) { +func (l Logger) Info(msg string, keysAndValues ...any) { if l.sink == nil { return } - if l.Enabled() { + if l.sink.Enabled(l.level) { // see comment in Enabled if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -289,7 +295,7 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // while the err argument should be used to attach the actual error that // triggered this log line, if present. The err parameter is optional // and nil may be passed instead of an error instance. -func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { +func (l Logger) Error(err error, msg string, keysAndValues ...any) { if l.sink == nil { return } @@ -314,9 +320,16 @@ func (l Logger) V(level int) Logger { return l } +// GetV returns the verbosity level of the logger. If the logger's LogSink is +// nil as in the Discard logger, this will always return 0. +func (l Logger) GetV() int { + // 0 if l.sink nil because of the if check in V above. + return l.level +} + // WithValues returns a new Logger instance with additional key/value pairs. // See Info for documentation on how key/value pairs work. -func (l Logger) WithValues(keysAndValues ...interface{}) Logger { +func (l Logger) WithValues(keysAndValues ...any) Logger { if l.sink == nil { return l } @@ -467,15 +480,15 @@ type LogSink interface { // The level argument is provided for optional logging. This method will // only be called when Enabled(level) is true. See Logger.Info for more // details. - Info(level int, msg string, keysAndValues ...interface{}) + Info(level int, msg string, keysAndValues ...any) // Error logs an error, with the given message and key/value pairs as // context. See Logger.Error for more details. - Error(err error, msg string, keysAndValues ...interface{}) + Error(err error, msg string, keysAndValues ...any) // WithValues returns a new LogSink with additional key/value pairs. See // Logger.WithValues for more details. - WithValues(keysAndValues ...interface{}) LogSink + WithValues(keysAndValues ...any) LogSink // WithName returns a new LogSink with the specified name appended. See // Logger.WithName for more details. @@ -546,5 +559,5 @@ type Marshaler interface { // with exported fields // // It may return any value of any type. - MarshalLog() interface{} + MarshalLog() any } diff --git a/vendor/github.com/go-logr/logr/slogr/sloghandler.go b/vendor/github.com/go-logr/logr/slogr/sloghandler.go new file mode 100644 index 0000000000..ec6725ce2c --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/sloghandler.go @@ -0,0 +1,168 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package slogr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +type slogHandler struct { + // May be nil, in which case all logs get discarded. + sink logr.LogSink + // Non-nil if sink is non-nil and implements SlogSink. + slogSink SlogSink + + // groupPrefix collects values from WithGroup calls. It gets added as + // prefix to value keys when handling a log record. + groupPrefix string + + // levelBias can be set when constructing the handler to influence the + // slog.Level of log records. A positive levelBias reduces the + // slog.Level value. slog has no API to influence this value after the + // handler got created, so it can only be set indirectly through + // Logger.V. + levelBias slog.Level +} + +var _ slog.Handler = &slogHandler{} + +// groupSeparator is used to concatenate WithGroup names and attribute keys. +const groupSeparator = "." + +// GetLevel is used for black box unit testing. +func (l *slogHandler) GetLevel() slog.Level { + return l.levelBias +} + +func (l *slogHandler) Enabled(ctx context.Context, level slog.Level) bool { + return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) +} + +func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { + if l.slogSink != nil { + // Only adjust verbosity level of log entries < slog.LevelError. + if record.Level < slog.LevelError { + record.Level -= l.levelBias + } + return l.slogSink.Handle(ctx, record) + } + + // No need to check for nil sink here because Handle will only be called + // when Enabled returned true. + + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + if attr.Key != "" { + kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) + } + return true + }) + if record.Level >= slog.LevelError { + l.sinkWithCallDepth().Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.sinkWithCallDepth().Info(level, record.Message, kvList...) + } + return nil +} + +// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info +// are called by Handle, code in slog gets skipped. +// +// This offset currently (Go 1.21.0) works for calls through +// slog.New(NewSlogHandler(...)). There's no guarantee that the call +// chain won't change. Wrapping the handler will also break unwinding. It's +// still better than not adjusting at all.... +// +// This cannot be done when constructing the handler because NewLogr needs +// access to the original sink without this adjustment. A second copy would +// work, but then WithAttrs would have to be called for both of them. +func (l *slogHandler) sinkWithCallDepth() logr.LogSink { + if sink, ok := l.sink.(logr.CallDepthLogSink); ok { + return sink.WithCallDepth(2) + } + return l.sink +} + +func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + if l.sink == nil || len(attrs) == 0 { + return l + } + + copy := *l + if l.slogSink != nil { + copy.slogSink = l.slogSink.WithAttrs(attrs) + copy.sink = copy.slogSink + } else { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + if attr.Key != "" { + kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) + } + } + copy.sink = l.sink.WithValues(kvList...) + } + return © +} + +func (l *slogHandler) WithGroup(name string) slog.Handler { + if l.sink == nil { + return l + } + copy := *l + if l.slogSink != nil { + copy.slogSink = l.slogSink.WithGroup(name) + copy.sink = l.slogSink + } else { + copy.groupPrefix = copy.addGroupPrefix(name) + } + return © +} + +func (l *slogHandler) addGroupPrefix(name string) string { + if l.groupPrefix == "" { + return name + } + return l.groupPrefix + groupSeparator + name +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a logr.LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(slogr.NewSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l *slogHandler) levelFromSlog(level slog.Level) int { + result := -level + result += l.levelBias // in case the original logr.Logger had a V level + if result < 0 { + result = 0 // because logr.LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/slogr/slogr.go b/vendor/github.com/go-logr/logr/slogr/slogr.go new file mode 100644 index 0000000000..eb519ae23f --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/slogr.go @@ -0,0 +1,108 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package slogr enables usage of a slog.Handler with logr.Logger as front-end +// API and of a logr.LogSink through the slog.Handler and thus slog.Logger +// APIs. +// +// See the README in the top-level [./logr] package for a discussion of +// interoperability. +package slogr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +// NewLogr returns a logr.Logger which writes to the slog.Handler. +// +// The logr verbosity level is mapped to slog levels such that V(0) becomes +// slog.LevelInfo and V(4) becomes slog.LevelDebug. +func NewLogr(handler slog.Handler) logr.Logger { + if handler, ok := handler.(*slogHandler); ok { + if handler.sink == nil { + return logr.Discard() + } + return logr.New(handler.sink).V(int(handler.levelBias)) + } + return logr.New(&slogSink{handler: handler}) +} + +// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. +// +// The returned logger writes all records with level >= slog.LevelError as +// error log entries with LogSink.Error, regardless of the verbosity level of +// the logr.Logger: +// +// logger := +// slog.New(NewSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) +// +// The level of all other records gets reduced by the verbosity +// level of the logr.Logger and the result is negated. If it happens +// to be negative, then it gets replaced by zero because a LogSink +// is not expected to handled negative levels: +// +// slog.New(NewSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) +// slog.New(NewSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(NewSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(NewSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +func NewSlogHandler(logger logr.Logger) slog.Handler { + if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { + return sink.handler + } + + handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} + if slogSink, ok := handler.sink.(SlogSink); ok { + handler.slogSink = slogSink + } + return handler +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. It then should +// also support special slog values like slog.Group. When used as a +// slog.Handler, the advantages are: +// +// - stack unwinding gets avoided in favor of logging the pre-recorded PC, +// as intended by slog +// - proper grouping of key/value pairs via WithGroup +// - verbosity levels > slog.LevelInfo can be recorded +// - less overhead +// +// Both APIs (logr.Logger and slog.Logger/Handler) then are supported equally +// well. Developers can pick whatever API suits them better and/or mix +// packages which use either API in the same binary with a common logging +// implementation. +// +// This interface is necessary because the type implementing the LogSink +// interface cannot also implement the slog.Handler interface due to the +// different prototype of the common Enabled method. +// +// An implementation could support both interfaces in two different types, but then +// additional interfaces would be needed to convert between those types in NewLogr +// and NewSlogHandler. +type SlogSink interface { + logr.LogSink + + Handle(ctx context.Context, record slog.Record) error + WithAttrs(attrs []slog.Attr) SlogSink + WithGroup(name string) SlogSink +} diff --git a/vendor/github.com/go-logr/logr/slogr/slogsink.go b/vendor/github.com/go-logr/logr/slogr/slogsink.go new file mode 100644 index 0000000000..6fbac561d9 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/slogsink.go @@ -0,0 +1,122 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package slogr + +import ( + "context" + "log/slog" + "runtime" + "time" + + "github.com/go-logr/logr" +) + +var ( + _ logr.LogSink = &slogSink{} + _ logr.CallDepthLogSink = &slogSink{} + _ Underlier = &slogSink{} +) + +// Underlier is implemented by the LogSink returned by NewLogr. +type Underlier interface { + // GetUnderlying returns the Handler used by the LogSink. + GetUnderlying() slog.Handler +} + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" + + // errKey is used to log the error parameter of Error as an additional attribute. + errKey = "err" +) + +type slogSink struct { + callDepth int + name string + handler slog.Handler +} + +func (l *slogSink) Init(info logr.RuntimeInfo) { + l.callDepth = info.CallDepth +} + +func (l *slogSink) GetUnderlying() slog.Handler { + return l.handler +} + +func (l *slogSink) WithCallDepth(depth int) logr.LogSink { + newLogger := *l + newLogger.callDepth += depth + return &newLogger +} + +func (l *slogSink) Enabled(level int) bool { + return l.handler.Enabled(context.Background(), slog.Level(-level)) +} + +func (l *slogSink) Info(level int, msg string, kvList ...interface{}) { + l.log(nil, msg, slog.Level(-level), kvList...) +} + +func (l *slogSink) Error(err error, msg string, kvList ...interface{}) { + l.log(err, msg, slog.LevelError, kvList...) +} + +func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) { + var pcs [1]uintptr + // skip runtime.Callers, this function, Info/Error, and all helper functions above that. + runtime.Callers(3+l.callDepth, pcs[:]) + + record := slog.NewRecord(time.Now(), level, msg, pcs[0]) + if l.name != "" { + record.AddAttrs(slog.String(nameKey, l.name)) + } + if err != nil { + record.AddAttrs(slog.Any(errKey, err)) + } + record.Add(kvList...) + l.handler.Handle(context.Background(), record) +} + +func (l slogSink) WithName(name string) logr.LogSink { + if l.name != "" { + l.name = l.name + "/" + } + l.name += name + return &l +} + +func (l slogSink) WithValues(kvList ...interface{}) logr.LogSink { + l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) + return &l +} + +func kvListToAttrs(kvList ...interface{}) []slog.Attr { + // We don't need the record itself, only its Add method. + record := slog.NewRecord(time.Time{}, 0, "", 0) + record.Add(kvList...) + attrs := make([]slog.Attr, 0, record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + attrs = append(attrs, attr) + return true + }) + return attrs +} diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index 7df9853def..de60dc7dd2 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -26,6 +26,7 @@ package jsonpointer import ( + "encoding/json" "errors" "fmt" "reflect" @@ -40,6 +41,7 @@ const ( pointerSeparator = `/` invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator + notFound = `Can't find the pointer in the document` ) var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() @@ -48,13 +50,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() // JSONPointable is an interface for structs to implement when they need to customize the // json pointer process type JSONPointable interface { - JSONLookup(string) (interface{}, error) + JSONLookup(string) (any, error) } // JSONSetable is an interface for structs to implement when they need to customize the // json pointer process type JSONSetable interface { - JSONSet(string, interface{}) error + JSONSet(string, any) error } // New creates a new json pointer for the given string @@ -81,9 +83,7 @@ func (p *Pointer) parse(jsonPointerString string) error { err = errors.New(invalidStart) } else { referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - for _, referenceToken := range referenceTokens[1:] { - p.referenceTokens = append(p.referenceTokens, referenceToken) - } + p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) } } @@ -91,26 +91,26 @@ func (p *Pointer) parse(jsonPointerString string) error { } // Get uses the pointer to retrieve a value from a JSON document -func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { +func (p *Pointer) Get(document any) (any, reflect.Kind, error) { return p.get(document, swag.DefaultJSONNameProvider) } // Set uses the pointer to set a value from a JSON document -func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { +func (p *Pointer) Set(document any, value any) (any, error) { return document, p.set(document, value, swag.DefaultJSONNameProvider) } // GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { +func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) { return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) } // SetForToken gets a value for a json pointer token 1 level deep -func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { +func SetForToken(document any, decodedToken string, value any) (any, error) { return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) } -func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { +func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() @@ -159,7 +159,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam } -func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { +func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error { rValue := reflect.Indirect(reflect.ValueOf(node)) if ns, ok := node.(JSONSetable); ok { // pointer impl @@ -210,7 +210,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw } -func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { +func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { if nameProvider == nil { nameProvider = swag.DefaultJSONNameProvider @@ -241,7 +241,7 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf return node, kind, nil } -func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { +func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { @@ -363,6 +363,127 @@ func (p *Pointer) String() string { return pointerString } +func (p *Pointer) Offset(document string) (int64, error) { + dec := json.NewDecoder(strings.NewReader(document)) + var offset int64 + for _, ttk := range p.DecodedTokens() { + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + offset, err = offsetSingleObject(dec, ttk) + if err != nil { + return 0, err + } + case '[': + offset, err = offsetSingleArray(dec, ttk) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return offset, nil +} + +func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { + for dec.More() { + offset := dec.InputOffset() + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err := drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err := drainSingle(dec); err != nil { + return 0, err + } + } + case string: + if tk == decodedToken { + return offset, nil + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return 0, fmt.Errorf("token reference %q not found", decodedToken) +} + +func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { + idx, err := strconv.Atoi(decodedToken) + if err != nil { + return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err) + } + var i int + for i = 0; i < idx && dec.More(); i++ { + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err := drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err := drainSingle(dec); err != nil { + return 0, err + } + } + } + } + if !dec.More() { + return 0, fmt.Errorf("token reference %q not found", decodedToken) + } + return dec.InputOffset(), nil +} + +// drainSingle drains a single level of object or array. +// The decoder has to guarantee the begining delim (i.e. '{' or '[') has been consumed. +func drainSingle(dec *json.Decoder) error { + for dec.More() { + tk, err := dec.Token() + if err != nil { + return err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err := drainSingle(dec); err != nil { + return err + } + case '[': + if err := drainSingle(dec); err != nil { + return err + } + } + } + } + // Consumes the ending delim + if _, err := dec.Token(); err != nil { + return err + } + return nil +} + // Specific JSON pointer encoding here // ~0 => ~ // ~1 => / diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml index 835d55e742..22f8d21cca 100644 --- a/vendor/github.com/go-openapi/spec/.golangci.yml +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -11,7 +11,7 @@ linters-settings: threshold: 200 goconst: min-len: 2 - min-occurrences: 2 + min-occurrences: 3 linters: enable-all: true @@ -40,3 +40,22 @@ linters: - tparallel - thelper - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/spec/appveyor.yml b/vendor/github.com/go-openapi/spec/appveyor.yml deleted file mode 100644 index 0903593916..0000000000 --- a/vendor/github.com/go-openapi/spec/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\spec -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.15 - -test_script: - - go test -v -timeout 20m ./... - -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index d4ea889d44..012a4627cc 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -27,7 +27,6 @@ import ( // all relative $ref's will be resolved from there. // // PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. -// type ExpandOptions struct { RelativeBase string // the path to the root document to expand. This is a file, not a directory SkipSchemas bool // do not expand schemas, just paths, parameters and responses diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go index 2df0723154..f19f1a8fb6 100644 --- a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go +++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go @@ -40,5 +40,5 @@ func repairURI(in string) (*url.URL, string) { return u, "" } -func fixWindowsURI(u *url.URL, in string) { +func fixWindowsURI(_ *url.URL, _ string) { } diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go index 995ce6acb1..a69cca8814 100644 --- a/vendor/github.com/go-openapi/spec/operation.go +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -217,9 +217,12 @@ func (o *Operation) AddParam(param *Parameter) *Operation { for i, p := range o.Parameters { if p.Name == param.Name && p.In == param.In { - params := append(o.Parameters[:i], *param) + params := make([]Parameter, 0, len(o.Parameters)+1) + params = append(params, o.Parameters[:i]...) + params = append(params, *param) params = append(params, o.Parameters[i+1:]...) o.Parameters = params + return o } } diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go index 2b2b89b67b..bd4f1cdb07 100644 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -84,27 +84,27 @@ type ParamProps struct { // Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). // // There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part -// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, -// the path parameter is `itemId`. -// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. -// * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be -// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for -// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist -// together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or -// `multipart/form-data` are used as the content type of the request (in Swagger's definition, -// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used -// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be -// declared together with a body parameter for the same operation. Form parameters have a different format based on -// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. -// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple -// parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. -// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is -// `submit-name`. This type of form parameters is more commonly used for file transfers. +// - Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// - Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// - Header - Custom headers that are expected as part of the request. +// - Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// - Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// - `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// - `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. // // For more information: http://goo.gl/8us55a#parameterObject type Parameter struct { diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go index 7d38b6e625..b3885034eb 100644 --- a/vendor/github.com/go-openapi/spec/spec.go +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -26,7 +26,7 @@ import ( const ( // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" - // JSONSchemaURL the url for the json schema schema + // JSONSchemaURL the url for the json schema JSONSchemaURL = "http://json-schema.org/draft-04/schema#" ) diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go index 44722ffd5a..1590fd1751 100644 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -253,7 +253,7 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) { // UnmarshalJSON converts this bool or schema object from a JSON structure func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { var nw SchemaOrBool - if len(data) >= 4 { + if len(data) > 0 { if data[0] == '{' { var sch Schema if err := json.Unmarshal(data, &sch); err != nil { @@ -261,7 +261,7 @@ func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { } nw.Schema = &sch } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + nw.Allows = !bytes.Equal(data, []byte("false")) } *s = nw return nil diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go index 9bef4c3b33..682235dae5 100644 --- a/vendor/github.com/go-openapi/strfmt/time.go +++ b/vendor/github.com/go-openapi/strfmt/time.go @@ -76,6 +76,8 @@ const ( ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04" // ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern. ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05" + // short form of ISO8601TimeUniversalSortableDateTimePattern + ISO8601TimeUniversalSortableDateTimePatternShortForm = "2006-01-02" // DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6 DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$` ) @@ -84,7 +86,7 @@ var ( rxDateTime = regexp.MustCompile(DateTimePattern) // DateTimeFormats is the collection of formats used by ParseDateTime() - DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern} + DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern, ISO8601TimeUniversalSortableDateTimePatternShortForm} // MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds) MarshalFormat = RFC3339Millis diff --git a/vendor/github.com/go-openapi/validate/.golangci.yml b/vendor/github.com/go-openapi/validate/.golangci.yml index 81818ca678..348b9b45fa 100644 --- a/vendor/github.com/go-openapi/validate/.golangci.yml +++ b/vendor/github.com/go-openapi/validate/.golangci.yml @@ -39,7 +39,12 @@ linters: - tparallel - paralleltest - cyclop # because we have gocyclo already + - depguard # we do not add a config for this # TODO: review the linters below. We disabled them to make the CI pass first. + - nonamedreturns + - exhaustruct + - nosnakecase + - nolintlint - ireturn - varnamelen - forcetypeassert diff --git a/vendor/github.com/go-openapi/validate/appveyor.yml b/vendor/github.com/go-openapi/validate/appveyor.yml deleted file mode 100644 index 89e5bccb3a..0000000000 --- a/vendor/github.com/go-openapi/validate/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\validate -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.15 - -test_script: - - go test -v -timeout 20m -args -enable-long ./... - -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go index bd14c2a269..670a1773ac 100644 --- a/vendor/github.com/go-openapi/validate/default_validator.go +++ b/vendor/github.com/go-openapi/validate/default_validator.go @@ -170,7 +170,7 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode) - // nolint: dupl + //nolint: dupl if response.Headers != nil { // Safeguard for nm, h := range response.Headers { // reset explored schemas to get depth-first recursive-proof exploration @@ -262,6 +262,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri } // TODO: Temporary duplicated code. Need to refactor with examples + // nolint: dupl func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result { res := new(Result) diff --git a/vendor/github.com/go-openapi/validate/doc.go b/vendor/github.com/go-openapi/validate/doc.go index f5ca9a5d58..d2b901eab9 100644 --- a/vendor/github.com/go-openapi/validate/doc.go +++ b/vendor/github.com/go-openapi/validate/doc.go @@ -19,7 +19,7 @@ as well as tools to validate data against their schema. This package follows Swagger 2.0. specification (aka OpenAPI 2.0). Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. -Validating a specification +# Validating a specification Validates a spec document (from JSON or YAML) against the JSON schema for swagger, then checks a number of extra rules that can't be expressed in JSON schema. @@ -30,34 +30,36 @@ Entry points: - SpecValidator.Validate() Reported as errors: - [x] definition can't declare a property that's already defined by one of its ancestors - [x] definition's ancestor can't be a descendant of the same model - [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method - [x] each security reference should contain only unique scopes - [x] each security scope in a security definition should be unique - [x] parameters in path must be unique - [x] each path parameter must correspond to a parameter placeholder and vice versa - [x] each referenceable definition must have references - [x] each definition property listed in the required array must be defined in the properties of the model - [x] each parameter should have a unique `name` and `type` combination - [x] each operation should have only 1 parameter of type body - [x] each reference must point to a valid object - [x] every default value that is specified must validate against the schema for that property - [x] items property is required for all schemas/definitions of type `array` - [x] path parameters must be declared a required - [x] headers must not contain $ref - [x] schema and property examples provided must validate against their respective object's schema - [x] examples provided must validate their schema + + [x] definition can't declare a property that's already defined by one of its ancestors + [x] definition's ancestor can't be a descendant of the same model + [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method. Validation can be laxed by disabling StrictPathParamUniqueness. + [x] each security reference should contain only unique scopes + [x] each security scope in a security definition should be unique + [x] parameters in path must be unique + [x] each path parameter must correspond to a parameter placeholder and vice versa + [x] each referenceable definition must have references + [x] each definition property listed in the required array must be defined in the properties of the model + [x] each parameter should have a unique `name` and `type` combination + [x] each operation should have only 1 parameter of type body + [x] each reference must point to a valid object + [x] every default value that is specified must validate against the schema for that property + [x] items property is required for all schemas/definitions of type `array` + [x] path parameters must be declared a required + [x] headers must not contain $ref + [x] schema and property examples provided must validate against their respective object's schema + [x] examples provided must validate their schema Reported as warnings: - [x] path parameters should not contain any of [{,},\w] - [x] empty path - [x] unused definitions - [x] unsupported validation of examples on non-JSON media types - [x] examples in response without schema - [x] readOnly properties should not be required -Validating a schema + [x] path parameters should not contain any of [{,},\w] + [x] empty path + [x] unused definitions + [x] unsupported validation of examples on non-JSON media types + [x] examples in response without schema + [x] readOnly properties should not be required + +# Validating a schema The schema validation toolkit validates data against JSON-schema-draft 04 schema. @@ -70,16 +72,16 @@ Entry points: - AgainstSchema() - ... -Known limitations +# Known limitations With the current version of this package, the following aspects of swagger are not yet supported: - [ ] errors and warnings are not reported with key/line number in spec - [ ] default values and examples on responses only support application/json producer type - [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values - [ ] rules for collectionFormat are not implemented - [ ] no validation rule for polymorphism support (discriminator) [not done here] - [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid - [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 + [ ] errors and warnings are not reported with key/line number in spec + [ ] default values and examples on responses only support application/json producer type + [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values + [ ] rules for collectionFormat are not implemented + [ ] no validation rule for polymorphism support (discriminator) [not done here] + [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid + [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 */ package validate diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go index c8bffd78e5..930b47e530 100644 --- a/vendor/github.com/go-openapi/validate/example_validator.go +++ b/vendor/github.com/go-openapi/validate/example_validator.go @@ -48,7 +48,6 @@ func (ex *exampleValidator) isVisited(path string) bool { // - schemas // - individual property // - responses -// func (ex *exampleValidator) Validate() (errs *Result) { errs = new(Result) if ex == nil || ex.SpecValidator == nil { diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go index 48ebfab58e..dc376f7f93 100644 --- a/vendor/github.com/go-openapi/validate/helpers.go +++ b/vendor/github.com/go-openapi/validate/helpers.go @@ -250,7 +250,7 @@ func (h *paramHelper) resolveParam(path, method, operationID string, param *spec } if err != nil { // Safeguard - // NOTE: we may enter enter here when the whole parameter is an unresolved $ref + // NOTE: we may enter here when the whole parameter is an unresolved $ref refPath := strings.Join([]string{"\"" + path + "\"", method}, ".") errorHelp.addPointerError(res, err, param.Ref.String(), refPath) return nil, res diff --git a/vendor/github.com/go-openapi/validate/options.go b/vendor/github.com/go-openapi/validate/options.go index deeec2f2ec..8a22ce9911 100644 --- a/vendor/github.com/go-openapi/validate/options.go +++ b/vendor/github.com/go-openapi/validate/options.go @@ -21,10 +21,28 @@ import "sync" // NOTE: other options might be needed, for example a go-swagger specific mode. type Opts struct { ContinueOnErrors bool // true: continue reporting errors, even if spec is invalid + + // StrictPathParamUniqueness enables a strict validation of paths that include + // path parameters. When true, it will enforce that for each method, the path + // is unique, regardless of path parameters such that GET:/petstore/{id} and + // GET:/petstore/{pet} anre considered duplicate paths. + // + // Consider disabling if path parameters can include slashes such as + // GET:/v1/{shelve} and GET:/v1/{book}, where the IDs are "shelve/*" and + // /"shelve/*/book/*" respectively. + StrictPathParamUniqueness bool } var ( - defaultOpts = Opts{ContinueOnErrors: false} // default is to stop validation on errors + defaultOpts = Opts{ + // default is to stop validation on errors + ContinueOnErrors: false, + + // StrictPathParamUniqueness is defaulted to true. This maintains existing + // behavior. + StrictPathParamUniqueness: true, + } + defaultOptsMutex = &sync.Mutex{} ) diff --git a/vendor/github.com/go-openapi/validate/schema.go b/vendor/github.com/go-openapi/validate/schema.go index b817eb0ef3..62b91dc5b0 100644 --- a/vendor/github.com/go-openapi/validate/schema.go +++ b/vendor/github.com/go-openapi/validate/schema.go @@ -101,7 +101,7 @@ func (s *SchemaValidator) SetPath(path string) { } // Applies returns true when this schema validator applies -func (s *SchemaValidator) Applies(source interface{}, kind reflect.Kind) bool { +func (s *SchemaValidator) Applies(source interface{}, _ reflect.Kind) bool { _, ok := source.(*spec.Schema) return ok } diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go index dff01f00be..5b867dd59f 100644 --- a/vendor/github.com/go-openapi/validate/spec.go +++ b/vendor/github.com/go-openapi/validate/spec.go @@ -32,17 +32,16 @@ import ( // // Returns an error flattening in a single standard error, all validation messages. // -// - TODO: $ref should not have siblings -// - TODO: make sure documentation reflects all checks and warnings -// - TODO: check on discriminators -// - TODO: explicit message on unsupported keywords (better than "forbidden property"...) -// - TODO: full list of unresolved refs -// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples -// - TODO: option to determine if we validate for go-swagger or in a more general context -// - TODO: check on required properties to support anyOf, allOf, oneOf +// - TODO: $ref should not have siblings +// - TODO: make sure documentation reflects all checks and warnings +// - TODO: check on discriminators +// - TODO: explicit message on unsupported keywords (better than "forbidden property"...) +// - TODO: full list of unresolved refs +// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples +// - TODO: option to determine if we validate for go-swagger or in a more general context +// - TODO: check on required properties to support anyOf, allOf, oneOf // // NOTE: SecurityScopes are maps: no need to check uniqueness -// func Spec(doc *loads.Document, formats strfmt.Registry) error { errs, _ /*warns*/ := NewSpecValidator(doc.Schema(), formats).Validate(doc) if errs.HasErrors() { @@ -615,7 +614,7 @@ func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Sche func (s *SpecValidator) validateParameters() *Result { // - for each method, path is unique, regardless of path parameters // e.g. GET:/petstore/{id}, GET:/petstore/{pet}, GET:/petstore are - // considered duplicate paths + // considered duplicate paths, if StrictPathParamUniqueness is enabled. // - each parameter should have a unique `name` and `type` combination // - each operation should have only 1 parameter of type body // - there must be at most 1 parameter in body @@ -627,28 +626,30 @@ func (s *SpecValidator) validateParameters() *Result { for method, pi := range s.expandedAnalyzer().Operations() { methodPaths := make(map[string]map[string]string) for path, op := range pi { - pathToAdd := pathHelp.stripParametersInPath(path) + if s.Options.StrictPathParamUniqueness { + pathToAdd := pathHelp.stripParametersInPath(path) - // Warn on garbled path afer param stripping - if rexGarbledPathSegment.MatchString(pathToAdd) { - res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd)) - } + // Warn on garbled path afer param stripping + if rexGarbledPathSegment.MatchString(pathToAdd) { + res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd)) + } - // Check uniqueness of stripped paths - if _, found := methodPaths[method][pathToAdd]; found { + // Check uniqueness of stripped paths + if _, found := methodPaths[method][pathToAdd]; found { - // Sort names for stable, testable output - if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 { - res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd])) + // Sort names for stable, testable output + if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 { + res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd])) + } else { + res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path)) + } } else { - res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path)) - } - } else { - if _, found := methodPaths[method]; !found { - methodPaths[method] = map[string]string{} - } - methodPaths[method][pathToAdd] = path // Original non stripped path + if _, found := methodPaths[method]; !found { + methodPaths[method] = map[string]string{} + } + methodPaths[method][pathToAdd] = path // Original non stripped path + } } var bodyParams []string diff --git a/vendor/github.com/go-openapi/validate/spec_messages.go b/vendor/github.com/go-openapi/validate/spec_messages.go index b3757adddb..5398679bff 100644 --- a/vendor/github.com/go-openapi/validate/spec_messages.go +++ b/vendor/github.com/go-openapi/validate/spec_messages.go @@ -349,9 +349,10 @@ func parameterValidationTypeMismatchMsg(param, path, typ string) errors.Error { } // disabled -// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error { -// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method) -// } +// +// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error { +// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method) +// } func someParametersBrokenMsg(path, method, operationID string) errors.Error { return errors.New(errors.CompositeErrorCode, SomeParametersBrokenError, path, method, operationID) } diff --git a/vendor/github.com/go-openapi/validate/validator.go b/vendor/github.com/go-openapi/validate/validator.go index 38cdb9bb6c..ee01f2a7b7 100644 --- a/vendor/github.com/go-openapi/validate/validator.go +++ b/vendor/github.com/go-openapi/validate/validator.go @@ -148,7 +148,7 @@ func (b *basicCommonValidator) SetPath(path string) { b.Path = path } -func (b *basicCommonValidator) Applies(source interface{}, kind reflect.Kind) bool { +func (b *basicCommonValidator) Applies(source interface{}, _ reflect.Kind) bool { switch source.(type) { case *spec.Parameter, *spec.Schema, *spec.Header: return true diff --git a/vendor/github.com/go-openapi/validate/values.go b/vendor/github.com/go-openapi/validate/values.go index e7ad8c1033..080fe21324 100644 --- a/vendor/github.com/go-openapi/validate/values.go +++ b/vendor/github.com/go-openapi/validate/values.go @@ -120,7 +120,7 @@ func UniqueItems(path, in string, data interface{}) *errors.Validation { // MinLength validates a string for minimum length func MinLength(path, in, data string, minLength int64) *errors.Validation { - strLen := int64(utf8.RuneCount([]byte(data))) + strLen := int64(utf8.RuneCountInString(data)) if strLen < minLength { return errors.TooShort(path, in, minLength, data) } @@ -129,7 +129,7 @@ func MinLength(path, in, data string, minLength int64) *errors.Validation { // MaxLength validates a string for maximum length func MaxLength(path, in, data string, maxLength int64) *errors.Validation { - strLen := int64(utf8.RuneCount([]byte(data))) + strLen := int64(utf8.RuneCountInString(data)) if strLen > maxLength { return errors.TooLong(path, in, maxLength, data) } diff --git a/vendor/github.com/google/gopacket/.gitignore b/vendor/github.com/google/gopacket/.gitignore new file mode 100644 index 0000000000..149266fdb6 --- /dev/null +++ b/vendor/github.com/google/gopacket/.gitignore @@ -0,0 +1,38 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +#* +*~ + +# examples binaries +examples/synscan/synscan +examples/pfdump/pfdump +examples/pcapdump/pcapdump +examples/httpassembly/httpassembly +examples/statsassembly/statsassembly +examples/arpscan/arpscan +examples/bidirectional/bidirectional +examples/bytediff/bytediff +examples/reassemblydump/reassemblydump +layers/gen +macs/gen +pcap/pcap_tester diff --git a/vendor/github.com/google/gopacket/.travis.gofmt.sh b/vendor/github.com/google/gopacket/.travis.gofmt.sh new file mode 100644 index 0000000000..e341a1cb78 --- /dev/null +++ b/vendor/github.com/google/gopacket/.travis.gofmt.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +cd "$(dirname $0)" +if [ -n "$(go fmt ./...)" ]; then + echo "Go code is not formatted, run 'go fmt github.com/google/stenographer/...'" >&2 + exit 1 +fi diff --git a/vendor/github.com/google/gopacket/.travis.golint.sh b/vendor/github.com/google/gopacket/.travis.golint.sh new file mode 100644 index 0000000000..0e267f5216 --- /dev/null +++ b/vendor/github.com/google/gopacket/.travis.golint.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +cd "$(dirname $0)" + +go get golang.org/x/lint/golint +DIRS=". tcpassembly tcpassembly/tcpreader ip4defrag reassembly macs pcapgo pcap afpacket pfring routing defrag/lcmdefrag" +# Add subdirectories here as we clean up golint on each. +for subdir in $DIRS; do + pushd $subdir + if golint | + grep -v CannotSetRFMon | # pcap exported error name + grep -v DataLost | # tcpassembly/tcpreader exported error name + grep .; then + exit 1 + fi + popd +done + +pushd layers +for file in *.go; do + if cat .lint_blacklist | grep -q $file; then + echo "Skipping lint of $file due to .lint_blacklist" + elif golint $file | grep .; then + echo "Lint error in file $file" + exit 1 + fi +done +popd diff --git a/vendor/github.com/google/gopacket/.travis.govet.sh b/vendor/github.com/google/gopacket/.travis.govet.sh new file mode 100644 index 0000000000..a5c13544ca --- /dev/null +++ b/vendor/github.com/google/gopacket/.travis.govet.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +cd "$(dirname $0)" +DIRS=". layers pcap pcapgo tcpassembly tcpassembly/tcpreader routing ip4defrag bytediff macs defrag/lcmdefrag" +set -e +for subdir in $DIRS; do + pushd $subdir + go vet + popd +done diff --git a/vendor/github.com/google/gopacket/.travis.install.sh b/vendor/github.com/google/gopacket/.travis.install.sh new file mode 100644 index 0000000000..648c901638 --- /dev/null +++ b/vendor/github.com/google/gopacket/.travis.install.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -ev + +go get github.com/google/gopacket +go get github.com/google/gopacket/layers +go get github.com/google/gopacket/tcpassembly +go get github.com/google/gopacket/reassembly +go get github.com/google/gopacket/pcapgo diff --git a/vendor/github.com/google/gopacket/.travis.script.sh b/vendor/github.com/google/gopacket/.travis.script.sh new file mode 100644 index 0000000000..a483f4f7c6 --- /dev/null +++ b/vendor/github.com/google/gopacket/.travis.script.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -ev + +go test github.com/google/gopacket +go test github.com/google/gopacket/layers +go test github.com/google/gopacket/tcpassembly +go test github.com/google/gopacket/reassembly +go test github.com/google/gopacket/pcapgo +go test github.com/google/gopacket/pcap diff --git a/vendor/github.com/google/gopacket/.travis.yml b/vendor/github.com/google/gopacket/.travis.yml new file mode 100644 index 0000000000..84f1f4945a --- /dev/null +++ b/vendor/github.com/google/gopacket/.travis.yml @@ -0,0 +1,57 @@ +language: go +go: + - 1.11.x + - 1.12.x + - 1.13.x + - master + +addons: + apt: + packages: + libpcap-dev + +# use modules except for older versions (see below) +install: true + +env: + - GO111MODULE=on + +script: ./.travis.script.sh + +matrix: + fast_finish: true + allow_failures: + - go: master + +jobs: + include: + - go: 1.5.x + install: ./.travis.install.sh + - go: 1.6.x + install: ./.travis.install.sh + - go: 1.7.x + install: ./.travis.install.sh + - go: 1.8.x + install: ./.travis.install.sh + - go: 1.9.x + install: ./.travis.install.sh + - go: 1.10.x + install: ./.travis.install.sh + - os: osx + go: 1.x +# windows doesn't work on travis (package installation just hangs and then errors out) +# - os: windows +# go: 1.x +# # We don't need nmap - but that's the only way to get npcap: +# before_install: choco install npcap --version 0.86 -y + - stage: style + name: "fmt/vet/lint" + go: 1.x + script: + - ./.travis.gofmt.sh + - ./.travis.govet.sh + - ./.travis.golint.sh + +stages: + - style + - test diff --git a/vendor/github.com/google/gopacket/AUTHORS b/vendor/github.com/google/gopacket/AUTHORS new file mode 100644 index 0000000000..24e834e451 --- /dev/null +++ b/vendor/github.com/google/gopacket/AUTHORS @@ -0,0 +1,54 @@ +AUTHORS AND MAINTAINERS: + +MAIN DEVELOPERS: +Graeme Connell + +AUTHORS: +Nigel Tao +Cole Mickens +Ben Daglish +Luis Martinez +Remco Verhoef +Hiroaki Kawai +Lukas Lueg +Laurent Hausermann +Bill Green +Christian Mäder +Gernot Vormayr +Vitor Garcia Graveto +Elias Chavarria Reyes +Daniel Rittweiler + +CONTRIBUTORS: +Attila Oláh +Vittus Mikiassen +Matthias Radestock +Matthew Sackman +Loic Prylli +Alexandre Fiori +Adrian Tam +Satoshi Matsumoto +David Stainton +Jesse Ward +Kane Mathers +Jose Selvi +Yerden Zhumabekov +Jensen Hwa + +----------------------------------------------- +FORKED FROM github.com/akrennmair/gopcap +ALL THE FOLLOWING ARE FOR THAT PROJECT + +MAIN DEVELOPERS: +Andreas Krennmair + +CONTRIBUTORS: +Andrea Nall +Daniel Arndt +Dustin Sallings +Graeme Connell +Guillaume Savary +Mark Smith +Miek Gieben +Mike Bell +Trevor Strohman diff --git a/vendor/github.com/google/gopacket/CONTRIBUTING.md b/vendor/github.com/google/gopacket/CONTRIBUTING.md new file mode 100644 index 0000000000..99ab7a2e4f --- /dev/null +++ b/vendor/github.com/google/gopacket/CONTRIBUTING.md @@ -0,0 +1,215 @@ +Contributing To gopacket +======================== + +So you've got some code and you'd like it to be part of gopacket... wonderful! +We're happy to accept contributions, whether they're fixes to old protocols, new +protocols entirely, or anything else you think would improve the gopacket +library. This document is designed to help you to do just that. + +The first section deals with the plumbing: how to actually get a change +submitted. + +The second section deals with coding style... Go is great in that it +has a uniform style implemented by 'go fmt', but there's still some decisions +we've made that go above and beyond, and if you follow them, they won't come up +in your code review. + +The third section deals with some of the implementation decisions we've made, +which may help you to understand the current code and which we may ask you to +conform to (or provide compelling reasons for ignoring). + +Overall, we hope this document will help you to understand our system and write +great code which fits in, and help us to turn around on your code review quickly +so the code can make it into the master branch as quickly as possible. + + +How To Submit Code +------------------ + +We use github.com's Pull Request feature to receive code contributions from +external contributors. See +https://help.github.com/articles/creating-a-pull-request/ for details on +how to create a request. + +Also, there's a local script `gc` in the base directory of GoPacket that +runs a local set of checks, which should give you relatively high confidence +that your pull won't fail github pull checks. + +```sh +go get github.com/google/gopacket +cd $GOROOT/src/pkg/github.com/google/gopacket +git checkout -b # create a new branch to work from +... code code code ... +./gc # Run this to do local commits, it performs a number of checks +``` + +To sum up: + +* DO + + Pull down the latest version. + + Make a feature-specific branch. + + Code using the style and methods discussed in the rest of this document. + + Use the ./gc command to do local commits or check correctness. + + Push your new feature branch up to github.com, as a pull request. + + Handle comments and requests from reviewers, pushing new commits up to + your feature branch as problems are addressed. + + Put interesting comments and discussions into commit comments. +* DON'T + + Push to someone else's branch without their permission. + + +Coding Style +------------ + +* Go code must be run through `go fmt`, `go vet`, and `golint` +* Follow http://golang.org/doc/effective_go.html as much as possible. + + In particular, http://golang.org/doc/effective_go.html#mixed-caps. Enums + should be be CamelCase, with acronyms capitalized (TCPSourcePort, vs. + TcpSourcePort or TCP_SOURCE_PORT). +* Bonus points for giving enum types a String() field. +* Any exported types or functions should have commentary + (http://golang.org/doc/effective_go.html#commentary) + + +Coding Methods And Implementation Notes +--------------------------------------- + +### Error Handling + +Many times, you'll be decoding a protocol and run across something bad, a packet +corruption or the like. How do you handle this? First off, ALWAYS report the +error. You can do this either by returning the error from the decode() function +(most common), or if you're up for it you can implement and add an ErrorLayer +through the packet builder (the first method is a simple shortcut that does +exactly this, then stops any future decoding). + +Often, you'll already have decode some part of your protocol by the time you hit +your error. Use your own discretion to determine whether the stuff you've +already decoded should be returned to the caller or not: + +```go +func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error { + prot := &MyProtocol{} + if len(data) < 10 { + // This error occurred before we did ANYTHING, so there's nothing in my + // protocol that the caller could possibly want. Just return the error. + return fmt.Errorf("Length %d less than 10", len(data)) + } + prot.ImportantField1 = data[:5] + prot.ImportantField2 = data[5:10] + // At this point, we've already got enough information in 'prot' to + // warrant returning it to the caller, so we'll add it now. + p.AddLayer(prot) + if len(data) < 15 { + // We encountered an error later in the packet, but the caller already + // has the important info we've gleaned so far. + return fmt.Errorf("Length %d less than 15", len(data)) + } + prot.ImportantField3 = data[10:15] + return nil // We've already added the layer, we can just return success. +} +``` + +In general, our code follows the approach of returning the first error it +encounters. In general, we don't trust any bytes after the first error we see. + +### What Is A Layer? + +The definition of a layer is up to the discretion of the coder. It should be +something important enough that it's actually useful to the caller (IE: every +TLV value should probably NOT be a layer). However, it can be more granular +than a single protocol... IPv6 and SCTP both implement many layers to handle the +various parts of the protocol. Use your best judgement, and prepare to defend +your decisions during code review. ;) + +### Performance + +We strive to make gopacket as fast as possible while still providing lots of +features. In general, this means: + +* Focus performance tuning on common protocols (IP4/6, TCP, etc), and optimize + others on an as-needed basis (tons of MPLS on your network? Time to optimize + MPLS!) +* Use fast operations. See the toplevel benchmark_test for benchmarks of some + of Go's underlying features and types. +* Test your performance changes! You should use the ./gc script's --benchmark + flag to submit any performance-related changes. Use pcap/gopacket_benchmark + to test your change against a PCAP file based on your traffic patterns. +* Don't be TOO hacky. Sometimes, removing an unused struct from a field causes + a huge performance hit, due to the way that Go currently handles its segmented + stack... don't be afraid to clean it up anyway. We'll trust the Go compiler + to get good enough over time to handle this. Also, this type of + compiler-specific optimization is very fragile; someone adding a field to an + entirely different struct elsewhere in the codebase could reverse any gains + you might achieve by aligning your allocations. +* Try to minimize memory allocations. If possible, use []byte to reference + pieces of the input, instead of using string, which requires copying the bytes + into a new memory allocation. +* Think hard about what should be evaluated lazily vs. not. In general, a + layer's struct should almost exactly mirror the layer's frame. Anything + that's more interesting should be a function. This may not always be + possible, but it's a good rule of thumb. +* Don't fear micro-optimizations. With the above in mind, we welcome + micro-optimizations that we think will have positive/neutral impacts on the + majority of workloads. A prime example of this is pre-allocating certain + structs within a larger one: + +```go +type MyProtocol struct { + // Most packets have 1-4 of VeryCommon, so we preallocate it here. + initialAllocation [4]uint32 + VeryCommon []uint32 +} + +func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error { + prot := &MyProtocol{} + prot.VeryCommon = proto.initialAllocation[:0] + for len(data) > 4 { + field := binary.BigEndian.Uint32(data[:4]) + data = data[4:] + // Since we're using the underlying initialAllocation, we won't need to + // allocate new memory for the following append unless we more than 16 + // bytes of data, which should be the uncommon case. + prot.VeryCommon = append(prot.VeryCommon, field) + } + p.AddLayer(prot) + if len(data) > 0 { + return fmt.Errorf("MyProtocol packet has %d bytes left after decoding", len(data)) + } + return nil +} +``` + +### Slices And Data + +If you're pulling a slice from the data you're decoding, don't copy it. Just +use the slice itself. + +```go +type MyProtocol struct { + A, B net.IP +} +func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error { + p.AddLayer(&MyProtocol{ + A: data[:4], + B: data[4:8], + }) + return nil +} +``` + +The caller has already agreed, by using this library, that they won't modify the +set of bytes they pass in to the decoder, or the library has already copied the +set of bytes to a read-only location. See DecodeOptions.NoCopy for more +information. + +### Enums/Types + +If a protocol has an integer field (uint8, uint16, etc) with a couple of known +values that mean something special, make it a type. This allows us to do really +nice things like adding a String() function to them, so we can more easily +display those to users. Check out layers/enums.go for one example, as well as +layers/icmp.go for layer-specific enums. + +When naming things, try for descriptiveness over suscinctness. For example, +choose DNSResponseRecord over DNSRR. diff --git a/vendor/github.com/google/gopacket/LICENSE b/vendor/github.com/google/gopacket/LICENSE new file mode 100644 index 0000000000..2100d524d9 --- /dev/null +++ b/vendor/github.com/google/gopacket/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Google, Inc. All rights reserved. +Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Andreas Krennmair, Google, nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/gopacket/README.md b/vendor/github.com/google/gopacket/README.md new file mode 100644 index 0000000000..efe462ee10 --- /dev/null +++ b/vendor/github.com/google/gopacket/README.md @@ -0,0 +1,12 @@ +# GoPacket + +This library provides packet decoding capabilities for Go. +See [godoc](https://godoc.org/github.com/google/gopacket) for more details. + +[![Build Status](https://travis-ci.org/google/gopacket.svg?branch=master)](https://travis-ci.org/google/gopacket) +[![GoDoc](https://godoc.org/github.com/google/gopacket?status.svg)](https://godoc.org/github.com/google/gopacket) + +Minimum Go version required is 1.5 except for pcapgo/EthernetHandle, afpacket, and bsdbpf which need at least 1.9 due to x/sys/unix dependencies. + +Originally forked from the gopcap project written by Andreas +Krennmair (http://github.com/akrennmair/gopcap). diff --git a/vendor/github.com/google/gopacket/base.go b/vendor/github.com/google/gopacket/base.go new file mode 100644 index 0000000000..91e150c215 --- /dev/null +++ b/vendor/github.com/google/gopacket/base.go @@ -0,0 +1,178 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "fmt" +) + +// Layer represents a single decoded packet layer (using either the +// OSI or TCP/IP definition of a layer). When decoding, a packet's data is +// broken up into a number of layers. The caller may call LayerType() to +// figure out which type of layer they've received from the packet. Optionally, +// they may then use a type assertion to get the actual layer type for deep +// inspection of the data. +type Layer interface { + // LayerType is the gopacket type for this layer. + LayerType() LayerType + // LayerContents returns the set of bytes that make up this layer. + LayerContents() []byte + // LayerPayload returns the set of bytes contained within this layer, not + // including the layer itself. + LayerPayload() []byte +} + +// Payload is a Layer containing the payload of a packet. The definition of +// what constitutes the payload of a packet depends on previous layers; for +// TCP and UDP, we stop decoding above layer 4 and return the remaining +// bytes as a Payload. Payload is an ApplicationLayer. +type Payload []byte + +// LayerType returns LayerTypePayload +func (p Payload) LayerType() LayerType { return LayerTypePayload } + +// LayerContents returns the bytes making up this layer. +func (p Payload) LayerContents() []byte { return []byte(p) } + +// LayerPayload returns the payload within this layer. +func (p Payload) LayerPayload() []byte { return nil } + +// Payload returns this layer as bytes. +func (p Payload) Payload() []byte { return []byte(p) } + +// String implements fmt.Stringer. +func (p Payload) String() string { return fmt.Sprintf("%d byte(s)", len(p)) } + +// GoString implements fmt.GoStringer. +func (p Payload) GoString() string { return LongBytesGoString([]byte(p)) } + +// CanDecode implements DecodingLayer. +func (p Payload) CanDecode() LayerClass { return LayerTypePayload } + +// NextLayerType implements DecodingLayer. +func (p Payload) NextLayerType() LayerType { return LayerTypeZero } + +// DecodeFromBytes implements DecodingLayer. +func (p *Payload) DecodeFromBytes(data []byte, df DecodeFeedback) error { + *p = Payload(data) + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (p Payload) SerializeTo(b SerializeBuffer, opts SerializeOptions) error { + bytes, err := b.PrependBytes(len(p)) + if err != nil { + return err + } + copy(bytes, p) + return nil +} + +// decodePayload decodes data by returning it all in a Payload layer. +func decodePayload(data []byte, p PacketBuilder) error { + payload := &Payload{} + if err := payload.DecodeFromBytes(data, p); err != nil { + return err + } + p.AddLayer(payload) + p.SetApplicationLayer(payload) + return nil +} + +// Fragment is a Layer containing a fragment of a larger frame, used by layers +// like IPv4 and IPv6 that allow for fragmentation of their payloads. +type Fragment []byte + +// LayerType returns LayerTypeFragment +func (p *Fragment) LayerType() LayerType { return LayerTypeFragment } + +// LayerContents implements Layer. +func (p *Fragment) LayerContents() []byte { return []byte(*p) } + +// LayerPayload implements Layer. +func (p *Fragment) LayerPayload() []byte { return nil } + +// Payload returns this layer as a byte slice. +func (p *Fragment) Payload() []byte { return []byte(*p) } + +// String implements fmt.Stringer. +func (p *Fragment) String() string { return fmt.Sprintf("%d byte(s)", len(*p)) } + +// CanDecode implements DecodingLayer. +func (p *Fragment) CanDecode() LayerClass { return LayerTypeFragment } + +// NextLayerType implements DecodingLayer. +func (p *Fragment) NextLayerType() LayerType { return LayerTypeZero } + +// DecodeFromBytes implements DecodingLayer. +func (p *Fragment) DecodeFromBytes(data []byte, df DecodeFeedback) error { + *p = Fragment(data) + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (p *Fragment) SerializeTo(b SerializeBuffer, opts SerializeOptions) error { + bytes, err := b.PrependBytes(len(*p)) + if err != nil { + return err + } + copy(bytes, *p) + return nil +} + +// decodeFragment decodes data by returning it all in a Fragment layer. +func decodeFragment(data []byte, p PacketBuilder) error { + payload := &Fragment{} + if err := payload.DecodeFromBytes(data, p); err != nil { + return err + } + p.AddLayer(payload) + p.SetApplicationLayer(payload) + return nil +} + +// These layers correspond to Internet Protocol Suite (TCP/IP) layers, and their +// corresponding OSI layers, as best as possible. + +// LinkLayer is the packet layer corresponding to TCP/IP layer 1 (OSI layer 2) +type LinkLayer interface { + Layer + LinkFlow() Flow +} + +// NetworkLayer is the packet layer corresponding to TCP/IP layer 2 (OSI +// layer 3) +type NetworkLayer interface { + Layer + NetworkFlow() Flow +} + +// TransportLayer is the packet layer corresponding to the TCP/IP layer 3 (OSI +// layer 4) +type TransportLayer interface { + Layer + TransportFlow() Flow +} + +// ApplicationLayer is the packet layer corresponding to the TCP/IP layer 4 (OSI +// layer 7), also known as the packet payload. +type ApplicationLayer interface { + Layer + Payload() []byte +} + +// ErrorLayer is a packet layer created when decoding of the packet has failed. +// Its payload is all the bytes that we were unable to decode, and the returned +// error details why the decoding failed. +type ErrorLayer interface { + Layer + Error() error +} diff --git a/vendor/github.com/google/gopacket/decode.go b/vendor/github.com/google/gopacket/decode.go new file mode 100644 index 0000000000..2633f848ed --- /dev/null +++ b/vendor/github.com/google/gopacket/decode.go @@ -0,0 +1,157 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "errors" +) + +// DecodeFeedback is used by DecodingLayer layers to provide decoding metadata. +type DecodeFeedback interface { + // SetTruncated should be called if during decoding you notice that a packet + // is shorter than internal layer variables (HeaderLength, or the like) say it + // should be. It sets packet.Metadata().Truncated. + SetTruncated() +} + +type nilDecodeFeedback struct{} + +func (nilDecodeFeedback) SetTruncated() {} + +// NilDecodeFeedback implements DecodeFeedback by doing nothing. +var NilDecodeFeedback DecodeFeedback = nilDecodeFeedback{} + +// PacketBuilder is used by layer decoders to store the layers they've decoded, +// and to defer future decoding via NextDecoder. +// Typically, the pattern for use is: +// func (m *myDecoder) Decode(data []byte, p PacketBuilder) error { +// if myLayer, err := myDecodingLogic(data); err != nil { +// return err +// } else { +// p.AddLayer(myLayer) +// } +// // maybe do this, if myLayer is a LinkLayer +// p.SetLinkLayer(myLayer) +// return p.NextDecoder(nextDecoder) +// } +type PacketBuilder interface { + DecodeFeedback + // AddLayer should be called by a decoder immediately upon successful + // decoding of a layer. + AddLayer(l Layer) + // The following functions set the various specific layers in the final + // packet. Note that if many layers call SetX, the first call is kept and all + // other calls are ignored. + SetLinkLayer(LinkLayer) + SetNetworkLayer(NetworkLayer) + SetTransportLayer(TransportLayer) + SetApplicationLayer(ApplicationLayer) + SetErrorLayer(ErrorLayer) + // NextDecoder should be called by a decoder when they're done decoding a + // packet layer but not done with decoding the entire packet. The next + // decoder will be called to decode the last AddLayer's LayerPayload. + // Because of this, NextDecoder must only be called once all other + // PacketBuilder calls have been made. Set*Layer and AddLayer calls after + // NextDecoder calls will behave incorrectly. + NextDecoder(next Decoder) error + // DumpPacketData is used solely for decoding. If you come across an error + // you need to diagnose while processing a packet, call this and your packet's + // data will be dumped to stderr so you can create a test. This should never + // be called from a production decoder. + DumpPacketData() + // DecodeOptions returns the decode options + DecodeOptions() *DecodeOptions +} + +// Decoder is an interface for logic to decode a packet layer. Users may +// implement a Decoder to handle their own strange packet types, or may use one +// of the many decoders available in the 'layers' subpackage to decode things +// for them. +type Decoder interface { + // Decode decodes the bytes of a packet, sending decoded values and other + // information to PacketBuilder, and returning an error if unsuccessful. See + // the PacketBuilder documentation for more details. + Decode([]byte, PacketBuilder) error +} + +// DecodeFunc wraps a function to make it a Decoder. +type DecodeFunc func([]byte, PacketBuilder) error + +// Decode implements Decoder by calling itself. +func (d DecodeFunc) Decode(data []byte, p PacketBuilder) error { + // function, call thyself. + return d(data, p) +} + +// DecodePayload is a Decoder that returns a Payload layer containing all +// remaining bytes. +var DecodePayload Decoder = DecodeFunc(decodePayload) + +// DecodeUnknown is a Decoder that returns an Unknown layer containing all +// remaining bytes, useful if you run up against a layer that you're unable to +// decode yet. This layer is considered an ErrorLayer. +var DecodeUnknown Decoder = DecodeFunc(decodeUnknown) + +// DecodeFragment is a Decoder that returns a Fragment layer containing all +// remaining bytes. +var DecodeFragment Decoder = DecodeFunc(decodeFragment) + +// LayerTypeZero is an invalid layer type, but can be used to determine whether +// layer type has actually been set correctly. +var LayerTypeZero = RegisterLayerType(0, LayerTypeMetadata{Name: "Unknown", Decoder: DecodeUnknown}) + +// LayerTypeDecodeFailure is the layer type for the default error layer. +var LayerTypeDecodeFailure = RegisterLayerType(1, LayerTypeMetadata{Name: "DecodeFailure", Decoder: DecodeUnknown}) + +// LayerTypePayload is the layer type for a payload that we don't try to decode +// but treat as a success, IE: an application-level payload. +var LayerTypePayload = RegisterLayerType(2, LayerTypeMetadata{Name: "Payload", Decoder: DecodePayload}) + +// LayerTypeFragment is the layer type for a fragment of a layer transported +// by an underlying layer that supports fragmentation. +var LayerTypeFragment = RegisterLayerType(3, LayerTypeMetadata{Name: "Fragment", Decoder: DecodeFragment}) + +// DecodeFailure is a packet layer created if decoding of the packet data failed +// for some reason. It implements ErrorLayer. LayerContents will be the entire +// set of bytes that failed to parse, and Error will return the reason parsing +// failed. +type DecodeFailure struct { + data []byte + err error + stack []byte +} + +// Error returns the error encountered during decoding. +func (d *DecodeFailure) Error() error { return d.err } + +// LayerContents implements Layer. +func (d *DecodeFailure) LayerContents() []byte { return d.data } + +// LayerPayload implements Layer. +func (d *DecodeFailure) LayerPayload() []byte { return nil } + +// String implements fmt.Stringer. +func (d *DecodeFailure) String() string { + return "Packet decoding error: " + d.Error().Error() +} + +// Dump implements Dumper. +func (d *DecodeFailure) Dump() (s string) { + if d.stack != nil { + s = string(d.stack) + } + return +} + +// LayerType returns LayerTypeDecodeFailure +func (d *DecodeFailure) LayerType() LayerType { return LayerTypeDecodeFailure } + +// decodeUnknown "decodes" unsupported data types by returning an error. +// This decoder will thus always return a DecodeFailure layer. +func decodeUnknown(data []byte, p PacketBuilder) error { + return errors.New("Layer type not currently supported") +} diff --git a/vendor/github.com/google/gopacket/doc.go b/vendor/github.com/google/gopacket/doc.go new file mode 100644 index 0000000000..b46e43dfa5 --- /dev/null +++ b/vendor/github.com/google/gopacket/doc.go @@ -0,0 +1,432 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +/* +Package gopacket provides packet decoding for the Go language. + +gopacket contains many sub-packages with additional functionality you may find +useful, including: + + * layers: You'll probably use this every time. This contains of the logic + built into gopacket for decoding packet protocols. Note that all example + code below assumes that you have imported both gopacket and + gopacket/layers. + * pcap: C bindings to use libpcap to read packets off the wire. + * pfring: C bindings to use PF_RING to read packets off the wire. + * afpacket: C bindings for Linux's AF_PACKET to read packets off the wire. + * tcpassembly: TCP stream reassembly + +Also, if you're looking to dive right into code, see the examples subdirectory +for numerous simple binaries built using gopacket libraries. + +Minimum go version required is 1.5 except for pcapgo/EthernetHandle, afpacket, +and bsdbpf which need at least 1.7 due to x/sys/unix dependencies. + +Basic Usage + +gopacket takes in packet data as a []byte and decodes it into a packet with +a non-zero number of "layers". Each layer corresponds to a protocol +within the bytes. Once a packet has been decoded, the layers of the packet +can be requested from the packet. + + // Decode a packet + packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Default) + // Get the TCP layer from this packet + if tcpLayer := packet.Layer(layers.LayerTypeTCP); tcpLayer != nil { + fmt.Println("This is a TCP packet!") + // Get actual TCP data from this layer + tcp, _ := tcpLayer.(*layers.TCP) + fmt.Printf("From src port %d to dst port %d\n", tcp.SrcPort, tcp.DstPort) + } + // Iterate over all layers, printing out each layer type + for _, layer := range packet.Layers() { + fmt.Println("PACKET LAYER:", layer.LayerType()) + } + +Packets can be decoded from a number of starting points. Many of our base +types implement Decoder, which allow us to decode packets for which +we don't have full data. + + // Decode an ethernet packet + ethP := gopacket.NewPacket(p1, layers.LayerTypeEthernet, gopacket.Default) + // Decode an IPv6 header and everything it contains + ipP := gopacket.NewPacket(p2, layers.LayerTypeIPv6, gopacket.Default) + // Decode a TCP header and its payload + tcpP := gopacket.NewPacket(p3, layers.LayerTypeTCP, gopacket.Default) + + +Reading Packets From A Source + +Most of the time, you won't just have a []byte of packet data lying around. +Instead, you'll want to read packets in from somewhere (file, interface, etc) +and process them. To do that, you'll want to build a PacketSource. + +First, you'll need to construct an object that implements the PacketDataSource +interface. There are implementations of this interface bundled with gopacket +in the gopacket/pcap and gopacket/pfring subpackages... see their documentation +for more information on their usage. Once you have a PacketDataSource, you can +pass it into NewPacketSource, along with a Decoder of your choice, to create +a PacketSource. + +Once you have a PacketSource, you can read packets from it in multiple ways. +See the docs for PacketSource for more details. The easiest method is the +Packets function, which returns a channel, then asynchronously writes new +packets into that channel, closing the channel if the packetSource hits an +end-of-file. + + packetSource := ... // construct using pcap or pfring + for packet := range packetSource.Packets() { + handlePacket(packet) // do something with each packet + } + +You can change the decoding options of the packetSource by setting fields in +packetSource.DecodeOptions... see the following sections for more details. + + +Lazy Decoding + +gopacket optionally decodes packet data lazily, meaning it +only decodes a packet layer when it needs to handle a function call. + + // Create a packet, but don't actually decode anything yet + packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy) + // Now, decode the packet up to the first IPv4 layer found but no further. + // If no IPv4 layer was found, the whole packet will be decoded looking for + // it. + ip4 := packet.Layer(layers.LayerTypeIPv4) + // Decode all layers and return them. The layers up to the first IPv4 layer + // are already decoded, and will not require decoding a second time. + layers := packet.Layers() + +Lazily-decoded packets are not concurrency-safe. Since layers have not all been +decoded, each call to Layer() or Layers() has the potential to mutate the packet +in order to decode the next layer. If a packet is used +in multiple goroutines concurrently, don't use gopacket.Lazy. Then gopacket +will decode the packet fully, and all future function calls won't mutate the +object. + + +NoCopy Decoding + +By default, gopacket will copy the slice passed to NewPacket and store the +copy within the packet, so future mutations to the bytes underlying the slice +don't affect the packet and its layers. If you can guarantee that the +underlying slice bytes won't be changed, you can use NoCopy to tell +gopacket.NewPacket, and it'll use the passed-in slice itself. + + // This channel returns new byte slices, each of which points to a new + // memory location that's guaranteed immutable for the duration of the + // packet. + for data := range myByteSliceChannel { + p := gopacket.NewPacket(data, layers.LayerTypeEthernet, gopacket.NoCopy) + doSomethingWithPacket(p) + } + +The fastest method of decoding is to use both Lazy and NoCopy, but note from +the many caveats above that for some implementations either or both may be +dangerous. + + +Pointers To Known Layers + +During decoding, certain layers are stored in the packet as well-known +layer types. For example, IPv4 and IPv6 are both considered NetworkLayer +layers, while TCP and UDP are both TransportLayer layers. We support 4 +layers, corresponding to the 4 layers of the TCP/IP layering scheme (roughly +anagalous to layers 2, 3, 4, and 7 of the OSI model). To access these, +you can use the packet.LinkLayer, packet.NetworkLayer, +packet.TransportLayer, and packet.ApplicationLayer functions. Each of +these functions returns a corresponding interface +(gopacket.{Link,Network,Transport,Application}Layer). The first three +provide methods for getting src/dst addresses for that particular layer, +while the final layer provides a Payload function to get payload data. +This is helpful, for example, to get payloads for all packets regardless +of their underlying data type: + + // Get packets from some source + for packet := range someSource { + if app := packet.ApplicationLayer(); app != nil { + if strings.Contains(string(app.Payload()), "magic string") { + fmt.Println("Found magic string in a packet!") + } + } + } + +A particularly useful layer is ErrorLayer, which is set whenever there's +an error parsing part of the packet. + + packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Default) + if err := packet.ErrorLayer(); err != nil { + fmt.Println("Error decoding some part of the packet:", err) + } + +Note that we don't return an error from NewPacket because we may have decoded +a number of layers successfully before running into our erroneous layer. You +may still be able to get your Ethernet and IPv4 layers correctly, even if +your TCP layer is malformed. + + +Flow And Endpoint + +gopacket has two useful objects, Flow and Endpoint, for communicating in a protocol +independent manner the fact that a packet is coming from A and going to B. +The general layer types LinkLayer, NetworkLayer, and TransportLayer all provide +methods for extracting their flow information, without worrying about the type +of the underlying Layer. + +A Flow is a simple object made up of a set of two Endpoints, one source and one +destination. It details the sender and receiver of the Layer of the Packet. + +An Endpoint is a hashable representation of a source or destination. For +example, for LayerTypeIPv4, an Endpoint contains the IP address bytes for a v4 +IP packet. A Flow can be broken into Endpoints, and Endpoints can be combined +into Flows: + + packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy) + netFlow := packet.NetworkLayer().NetworkFlow() + src, dst := netFlow.Endpoints() + reverseFlow := gopacket.NewFlow(dst, src) + +Both Endpoint and Flow objects can be used as map keys, and the equality +operator can compare them, so you can easily group together all packets +based on endpoint criteria: + + flows := map[gopacket.Endpoint]chan gopacket.Packet + packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy) + // Send all TCP packets to channels based on their destination port. + if tcp := packet.Layer(layers.LayerTypeTCP); tcp != nil { + flows[tcp.TransportFlow().Dst()] <- packet + } + // Look for all packets with the same source and destination network address + if net := packet.NetworkLayer(); net != nil { + src, dst := net.NetworkFlow().Endpoints() + if src == dst { + fmt.Println("Fishy packet has same network source and dst: %s", src) + } + } + // Find all packets coming from UDP port 1000 to UDP port 500 + interestingFlow := gopacket.FlowFromEndpoints(layers.NewUDPPortEndpoint(1000), layers.NewUDPPortEndpoint(500)) + if t := packet.NetworkLayer(); t != nil && t.TransportFlow() == interestingFlow { + fmt.Println("Found that UDP flow I was looking for!") + } + +For load-balancing purposes, both Flow and Endpoint have FastHash() functions, +which provide quick, non-cryptographic hashes of their contents. Of particular +importance is the fact that Flow FastHash() is symmetric: A->B will have the same +hash as B->A. An example usage could be: + + channels := [8]chan gopacket.Packet + for i := 0; i < 8; i++ { + channels[i] = make(chan gopacket.Packet) + go packetHandler(channels[i]) + } + for packet := range getPackets() { + if net := packet.NetworkLayer(); net != nil { + channels[int(net.NetworkFlow().FastHash()) & 0x7] <- packet + } + } + +This allows us to split up a packet stream while still making sure that each +stream sees all packets for a flow (and its bidirectional opposite). + + +Implementing Your Own Decoder + +If your network has some strange encapsulation, you can implement your own +decoder. In this example, we handle Ethernet packets which are encapsulated +in a 4-byte header. + + // Create a layer type, should be unique and high, so it doesn't conflict, + // giving it a name and a decoder to use. + var MyLayerType = gopacket.RegisterLayerType(12345, gopacket.LayerTypeMetadata{Name: "MyLayerType", Decoder: gopacket.DecodeFunc(decodeMyLayer)}) + + // Implement my layer + type MyLayer struct { + StrangeHeader []byte + payload []byte + } + func (m MyLayer) LayerType() gopacket.LayerType { return MyLayerType } + func (m MyLayer) LayerContents() []byte { return m.StrangeHeader } + func (m MyLayer) LayerPayload() []byte { return m.payload } + + // Now implement a decoder... this one strips off the first 4 bytes of the + // packet. + func decodeMyLayer(data []byte, p gopacket.PacketBuilder) error { + // Create my layer + p.AddLayer(&MyLayer{data[:4], data[4:]}) + // Determine how to handle the rest of the packet + return p.NextDecoder(layers.LayerTypeEthernet) + } + + // Finally, decode your packets: + p := gopacket.NewPacket(data, MyLayerType, gopacket.Lazy) + +See the docs for Decoder and PacketBuilder for more details on how coding +decoders works, or look at RegisterLayerType and RegisterEndpointType to see how +to add layer/endpoint types to gopacket. + + +Fast Decoding With DecodingLayerParser + +TLDR: DecodingLayerParser takes about 10% of the time as NewPacket to decode +packet data, but only for known packet stacks. + +Basic decoding using gopacket.NewPacket or PacketSource.Packets is somewhat slow +due to its need to allocate a new packet and every respective layer. It's very +versatile and can handle all known layer types, but sometimes you really only +care about a specific set of layers regardless, so that versatility is wasted. + +DecodingLayerParser avoids memory allocation altogether by decoding packet +layers directly into preallocated objects, which you can then reference to get +the packet's information. A quick example: + + func main() { + var eth layers.Ethernet + var ip4 layers.IPv4 + var ip6 layers.IPv6 + var tcp layers.TCP + parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip4, &ip6, &tcp) + decoded := []gopacket.LayerType{} + for packetData := range somehowGetPacketData() { + if err := parser.DecodeLayers(packetData, &decoded); err != nil { + fmt.Fprintf(os.Stderr, "Could not decode layers: %v\n", err) + continue + } + for _, layerType := range decoded { + switch layerType { + case layers.LayerTypeIPv6: + fmt.Println(" IP6 ", ip6.SrcIP, ip6.DstIP) + case layers.LayerTypeIPv4: + fmt.Println(" IP4 ", ip4.SrcIP, ip4.DstIP) + } + } + } + } + +The important thing to note here is that the parser is modifying the passed in +layers (eth, ip4, ip6, tcp) instead of allocating new ones, thus greatly +speeding up the decoding process. It's even branching based on layer type... +it'll handle an (eth, ip4, tcp) or (eth, ip6, tcp) stack. However, it won't +handle any other type... since no other decoders were passed in, an (eth, ip4, +udp) stack will stop decoding after ip4, and only pass back [LayerTypeEthernet, +LayerTypeIPv4] through the 'decoded' slice (along with an error saying it can't +decode a UDP packet). + +Unfortunately, not all layers can be used by DecodingLayerParser... only those +implementing the DecodingLayer interface are usable. Also, it's possible to +create DecodingLayers that are not themselves Layers... see +layers.IPv6ExtensionSkipper for an example of this. + +Faster And Customized Decoding with DecodingLayerContainer + +By default, DecodingLayerParser uses native map to store and search for a layer +to decode. Though being versatile, in some cases this solution may be not so +optimal. For example, if you have only few layers faster operations may be +provided by sparse array indexing or linear array scan. + +To accomodate these scenarios, DecodingLayerContainer interface is introduced +along with its implementations: DecodingLayerSparse, DecodingLayerArray and +DecodingLayerMap. You can specify a container implementation to +DecodingLayerParser with SetDecodingLayerContainer method. Example: + + dlp := gopacket.NewDecodingLayerParser(LayerTypeEthernet) + dlp.SetDecodingLayerContainer(gopacket.DecodingLayerSparse(nil)) + var eth layers.Ethernet + dlp.AddDecodingLayer(ð) + // ... add layers and use DecodingLayerParser as usual... + +To skip one level of indirection (though sacrificing some capabilities) you may +also use DecodingLayerContainer as a decoding tool as it is. In this case you have to +handle unknown layer types and layer panics by yourself. Example: + + func main() { + var eth layers.Ethernet + var ip4 layers.IPv4 + var ip6 layers.IPv6 + var tcp layers.TCP + dlc := gopacket.DecodingLayerContainer(gopacket.DecodingLayerArray(nil)) + dlc = dlc.Put(ð) + dlc = dlc.Put(&ip4) + dlc = dlc.Put(&ip6) + dlc = dlc.Put(&tcp) + // you may specify some meaningful DecodeFeedback + decoder := dlc.LayersDecoder(LayerTypeEthernet, gopacket.NilDecodeFeedback) + decoded := make([]gopacket.LayerType, 0, 20) + for packetData := range somehowGetPacketData() { + lt, err := decoder(packetData, &decoded) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not decode layers: %v\n", err) + continue + } + if lt != gopacket.LayerTypeZero { + fmt.Fprintf(os.Stderr, "unknown layer type: %v\n", lt) + continue + } + for _, layerType := range decoded { + // examine decoded layertypes just as already shown above + } + } + } + +DecodingLayerSparse is the fastest but most effective when LayerType values +that layers in use can decode are not large because otherwise that would lead +to bigger memory footprint. DecodingLayerArray is very compact and primarily +usable if the number of decoding layers is not big (up to ~10-15, but please do +your own benchmarks). DecodingLayerMap is the most versatile one and used by +DecodingLayerParser by default. Please refer to tests and benchmarks in layers +subpackage to further examine usage examples and performance measurements. + +You may also choose to implement your own DecodingLayerContainer if you want to +make use of your own internal packet decoding logic. + +Creating Packet Data + +As well as offering the ability to decode packet data, gopacket will allow you +to create packets from scratch, as well. A number of gopacket layers implement +the SerializableLayer interface; these layers can be serialized to a []byte in +the following manner: + + ip := &layers.IPv4{ + SrcIP: net.IP{1, 2, 3, 4}, + DstIP: net.IP{5, 6, 7, 8}, + // etc... + } + buf := gopacket.NewSerializeBuffer() + opts := gopacket.SerializeOptions{} // See SerializeOptions for more details. + err := ip.SerializeTo(buf, opts) + if err != nil { panic(err) } + fmt.Println(buf.Bytes()) // prints out a byte slice containing the serialized IPv4 layer. + +SerializeTo PREPENDS the given layer onto the SerializeBuffer, and they treat +the current buffer's Bytes() slice as the payload of the serializing layer. +Therefore, you can serialize an entire packet by serializing a set of layers in +reverse order (Payload, then TCP, then IP, then Ethernet, for example). The +SerializeBuffer's SerializeLayers function is a helper that does exactly that. + +To generate a (empty and useless, because no fields are set) +Ethernet(IPv4(TCP(Payload))) packet, for example, you can run: + + buf := gopacket.NewSerializeBuffer() + opts := gopacket.SerializeOptions{} + gopacket.SerializeLayers(buf, opts, + &layers.Ethernet{}, + &layers.IPv4{}, + &layers.TCP{}, + gopacket.Payload([]byte{1, 2, 3, 4})) + packetData := buf.Bytes() + +A Final Note + +If you use gopacket, you'll almost definitely want to make sure gopacket/layers +is imported, since when imported it sets all the LayerType variables and fills +in a lot of interesting variables/maps (DecodersByLayerName, etc). Therefore, +it's recommended that even if you don't use any layers functions directly, you still import with: + + import ( + _ "github.com/google/gopacket/layers" + ) +*/ +package gopacket diff --git a/vendor/github.com/google/gopacket/flows.go b/vendor/github.com/google/gopacket/flows.go new file mode 100644 index 0000000000..a00c88398e --- /dev/null +++ b/vendor/github.com/google/gopacket/flows.go @@ -0,0 +1,236 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "bytes" + "fmt" + "strconv" +) + +// MaxEndpointSize determines the maximum size in bytes of an endpoint address. +// +// Endpoints/Flows have a problem: They need to be hashable. Therefore, they +// can't use a byte slice. The two obvious choices are to use a string or a +// byte array. Strings work great, but string creation requires memory +// allocation, which can be slow. Arrays work great, but have a fixed size. We +// originally used the former, now we've switched to the latter. Use of a fixed +// byte-array doubles the speed of constructing a flow (due to not needing to +// allocate). This is a huge increase... too much for us to pass up. +// +// The end result of this, though, is that an endpoint/flow can't be created +// using more than MaxEndpointSize bytes per address. +const MaxEndpointSize = 16 + +// Endpoint is the set of bytes used to address packets at various layers. +// See LinkLayer, NetworkLayer, and TransportLayer specifications. +// Endpoints are usable as map keys. +type Endpoint struct { + typ EndpointType + len int + raw [MaxEndpointSize]byte +} + +// EndpointType returns the endpoint type associated with this endpoint. +func (a Endpoint) EndpointType() EndpointType { return a.typ } + +// Raw returns the raw bytes of this endpoint. These aren't human-readable +// most of the time, but they are faster than calling String. +func (a Endpoint) Raw() []byte { return a.raw[:a.len] } + +// LessThan provides a stable ordering for all endpoints. It sorts first based +// on the EndpointType of an endpoint, then based on the raw bytes of that +// endpoint. +// +// For some endpoints, the actual comparison may not make sense, however this +// ordering does provide useful information for most Endpoint types. +// Ordering is based first on endpoint type, then on raw endpoint bytes. +// Endpoint bytes are sorted lexicographically. +func (a Endpoint) LessThan(b Endpoint) bool { + return a.typ < b.typ || (a.typ == b.typ && bytes.Compare(a.raw[:a.len], b.raw[:b.len]) < 0) +} + +// fnvHash is used by our FastHash functions, and implements the FNV hash +// created by Glenn Fowler, Landon Curt Noll, and Phong Vo. +// See http://isthe.com/chongo/tech/comp/fnv/. +func fnvHash(s []byte) (h uint64) { + h = fnvBasis + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= fnvPrime + } + return +} + +const fnvBasis = 14695981039346656037 +const fnvPrime = 1099511628211 + +// FastHash provides a quick hashing function for an endpoint, useful if you'd +// like to split up endpoints by modulos or other load-balancing techniques. +// It uses a variant of Fowler-Noll-Vo hashing. +// +// The output of FastHash is not guaranteed to remain the same through future +// code revisions, so should not be used to key values in persistent storage. +func (a Endpoint) FastHash() (h uint64) { + h = fnvHash(a.raw[:a.len]) + h ^= uint64(a.typ) + h *= fnvPrime + return +} + +// NewEndpoint creates a new Endpoint object. +// +// The size of raw must be less than MaxEndpointSize, otherwise this function +// will panic. +func NewEndpoint(typ EndpointType, raw []byte) (e Endpoint) { + e.len = len(raw) + if e.len > MaxEndpointSize { + panic("raw byte length greater than MaxEndpointSize") + } + e.typ = typ + copy(e.raw[:], raw) + return +} + +// EndpointTypeMetadata is used to register a new endpoint type. +type EndpointTypeMetadata struct { + // Name is the string returned by an EndpointType's String function. + Name string + // Formatter is called from an Endpoint's String function to format the raw + // bytes in an Endpoint into a human-readable string. + Formatter func([]byte) string +} + +// EndpointType is the type of a gopacket Endpoint. This type determines how +// the bytes stored in the endpoint should be interpreted. +type EndpointType int64 + +var endpointTypes = map[EndpointType]EndpointTypeMetadata{} + +// RegisterEndpointType creates a new EndpointType and registers it globally. +// It MUST be passed a unique number, or it will panic. Numbers 0-999 are +// reserved for gopacket's use. +func RegisterEndpointType(num int, meta EndpointTypeMetadata) EndpointType { + t := EndpointType(num) + if _, ok := endpointTypes[t]; ok { + panic("Endpoint type number already in use") + } + endpointTypes[t] = meta + return t +} + +func (e EndpointType) String() string { + if t, ok := endpointTypes[e]; ok { + return t.Name + } + return strconv.Itoa(int(e)) +} + +func (a Endpoint) String() string { + if t, ok := endpointTypes[a.typ]; ok && t.Formatter != nil { + return t.Formatter(a.raw[:a.len]) + } + return fmt.Sprintf("%v:%v", a.typ, a.raw) +} + +// Flow represents the direction of traffic for a packet layer, as a source and destination Endpoint. +// Flows are usable as map keys. +type Flow struct { + typ EndpointType + slen, dlen int + src, dst [MaxEndpointSize]byte +} + +// FlowFromEndpoints creates a new flow by pasting together two endpoints. +// The endpoints must have the same EndpointType, or this function will return +// an error. +func FlowFromEndpoints(src, dst Endpoint) (_ Flow, err error) { + if src.typ != dst.typ { + err = fmt.Errorf("Mismatched endpoint types: %v->%v", src.typ, dst.typ) + return + } + return Flow{src.typ, src.len, dst.len, src.raw, dst.raw}, nil +} + +// FastHash provides a quick hashing function for a flow, useful if you'd +// like to split up flows by modulos or other load-balancing techniques. +// It uses a variant of Fowler-Noll-Vo hashing, and is guaranteed to collide +// with its reverse flow. IE: the flow A->B will have the same hash as the flow +// B->A. +// +// The output of FastHash is not guaranteed to remain the same through future +// code revisions, so should not be used to key values in persistent storage. +func (f Flow) FastHash() (h uint64) { + // This combination must be commutative. We don't use ^, since that would + // give the same hash for all A->A flows. + h = fnvHash(f.src[:f.slen]) + fnvHash(f.dst[:f.dlen]) + h ^= uint64(f.typ) + h *= fnvPrime + return +} + +// String returns a human-readable representation of this flow, in the form +// "Src->Dst" +func (f Flow) String() string { + s, d := f.Endpoints() + return fmt.Sprintf("%v->%v", s, d) +} + +// EndpointType returns the EndpointType for this Flow. +func (f Flow) EndpointType() EndpointType { + return f.typ +} + +// Endpoints returns the two Endpoints for this flow. +func (f Flow) Endpoints() (src, dst Endpoint) { + return Endpoint{f.typ, f.slen, f.src}, Endpoint{f.typ, f.dlen, f.dst} +} + +// Src returns the source Endpoint for this flow. +func (f Flow) Src() (src Endpoint) { + src, _ = f.Endpoints() + return +} + +// Dst returns the destination Endpoint for this flow. +func (f Flow) Dst() (dst Endpoint) { + _, dst = f.Endpoints() + return +} + +// Reverse returns a new flow with endpoints reversed. +func (f Flow) Reverse() Flow { + return Flow{f.typ, f.dlen, f.slen, f.dst, f.src} +} + +// NewFlow creates a new flow. +// +// src and dst must have length <= MaxEndpointSize, otherwise NewFlow will +// panic. +func NewFlow(t EndpointType, src, dst []byte) (f Flow) { + f.slen = len(src) + f.dlen = len(dst) + if f.slen > MaxEndpointSize || f.dlen > MaxEndpointSize { + panic("flow raw byte length greater than MaxEndpointSize") + } + f.typ = t + copy(f.src[:], src) + copy(f.dst[:], dst) + return +} + +// EndpointInvalid is an endpoint type used for invalid endpoints, IE endpoints +// that are specified incorrectly during creation. +var EndpointInvalid = RegisterEndpointType(0, EndpointTypeMetadata{Name: "invalid", Formatter: func(b []byte) string { + return fmt.Sprintf("%v", b) +}}) + +// InvalidEndpoint is a singleton Endpoint of type EndpointInvalid. +var InvalidEndpoint = NewEndpoint(EndpointInvalid, nil) + +// InvalidFlow is a singleton Flow of type EndpointInvalid. +var InvalidFlow = NewFlow(EndpointInvalid, nil, nil) diff --git a/vendor/github.com/google/gopacket/gc b/vendor/github.com/google/gopacket/gc new file mode 100644 index 0000000000..b1d8d2e1f6 --- /dev/null +++ b/vendor/github.com/google/gopacket/gc @@ -0,0 +1,288 @@ +#!/bin/bash +# Copyright 2012 Google, Inc. All rights reserved. + +# This script provides a simple way to run benchmarks against previous code and +# keep a log of how benchmarks change over time. When used with the --benchmark +# flag, it runs benchmarks from the current code and from the last commit run +# with --benchmark, then stores the results in the git commit description. We +# rerun the old benchmarks along with the new ones, since there's no guarantee +# that git commits will happen on the same machine, so machine differences could +# cause wildly inaccurate results. +# +# If you're making changes to 'gopacket' which could cause performance changes, +# you may be requested to use this commit script to make sure your changes don't +# have large detrimental effects (or to show off how awesome your performance +# improvements are). +# +# If not run with the --benchmark flag, this script is still very useful... it +# makes sure all the correct go formatting, building, and testing work as +# expected. + +function Usage { + cat < + +--benchmark: Run benchmark comparisons against last benchmark'd commit +--root: Run tests that require root priviledges +--gen: Generate code for MACs/ports by pulling down external data + +Note, some 'git commit' flags are necessary, if all else fails, pass in -a +EOF + exit 1 +} + +BENCH="" +GEN="" +ROOT="" +while [ ! -z "$1" ]; do + case "$1" in + "--benchmark") + BENCH="$2" + shift + shift + ;; + "--gen") + GEN="yes" + shift + ;; + "--root") + ROOT="yes" + shift + ;; + "--help") + Usage + ;; + "-h") + Usage + ;; + "help") + Usage + ;; + *) + break + ;; + esac +done + +function Root { + if [ ! -z "$ROOT" ]; then + local exec="$1" + # Some folks (like me) keep source code in places inaccessible by root (like + # NFS), so to make sure things run smoothly we copy them to a /tmp location. + local tmpfile="$(mktemp -t gopacket_XXXXXXXX)" + echo "Running root test executable $exec as $tmpfile" + cp "$exec" "$tmpfile" + chmod a+x "$tmpfile" + shift + sudo "$tmpfile" "$@" + fi +} + +if [ "$#" -eq "0" ]; then + Usage +fi + +cd $(dirname $0) + +# Check for copyright notices. +for filename in $(find ./ -type f -name '*.go'); do + if ! head -n 1 "$filename" | grep -q Copyright; then + echo "File '$filename' may not have copyright notice" + exit 1 + fi +done + +set -e +set -x + +if [ ! -z "$ROOT" ]; then + echo "Running SUDO to get root priviledges for root tests" + sudo echo "have root" +fi + +if [ ! -z "$GEN" ]; then + pushd macs + go run gen.go | gofmt > valid_mac_prefixes.go + popd + pushd layers + go run gen.go | gofmt > iana_ports.go + go run gen2.go | gofmt > enums_generated.go + popd +fi + +# Make sure everything is formatted, compiles, and tests pass. +go fmt ./... +go test -i ./... 2>/dev/null >/dev/null || true +go test +go build +pushd examples/bytediff +go build +popd +if [ -f /usr/include/pcap.h ]; then + pushd pcap + go test ./... + go build ./... + go build pcap_tester.go + Root pcap_tester --mode=basic + Root pcap_tester --mode=filtered + Root pcap_tester --mode=timestamp || echo "You might not support timestamp sources" + popd + pushd examples/afpacket + go build + popd + pushd examples/pcapdump + go build + popd + pushd examples/arpscan + go build + popd + pushd examples/bidirectional + go build + popd + pushd examples/synscan + go build + popd + pushd examples/httpassembly + go build + popd + pushd examples/statsassembly + go build + popd +fi +pushd macs +go test ./... +gofmt -w gen.go +go build gen.go +popd +pushd tcpassembly +go test ./... +popd +pushd reassembly +go test ./... +popd +pushd layers +gofmt -w gen.go +go build gen.go +go test ./... +popd +pushd pcapgo +go test ./... +go build ./... +popd +if [ -f /usr/include/linux/if_packet.h ]; then + if grep -q TPACKET_V3 /usr/include/linux/if_packet.h; then + pushd afpacket + go build ./... + go test ./... + popd + fi +fi +if [ -f /usr/include/pfring.h ]; then + pushd pfring + go test ./... + go build ./... + popd + pushd examples/pfdump + go build + popd +fi +pushd ip4defrag +go test ./... +popd +pushd defrag +go test ./... +popd + +for travis_script in `ls .travis.*.sh`; do + ./$travis_script +done + +# Run our initial commit +git commit "$@" + +if [ -z "$BENCH" ]; then + set +x + echo "We're not benchmarking and we've committed... we're done!" + exit +fi + +### If we get here, we want to run benchmarks from current commit, and compare +### then to benchmarks from the last --benchmark commit. + +# Get our current branch. +BRANCH="$(git branch | grep '^*' | awk '{print $2}')" + +# File we're going to build our commit description in. +COMMIT_FILE="$(mktemp /tmp/tmp.XXXXXXXX)" + +# Add the word "BENCH" to the start of the git commit. +echo -n "BENCH " > $COMMIT_FILE + +# Get the current description... there must be an easier way. +git log -n 1 | grep '^ ' | sed 's/^ //' >> $COMMIT_FILE + +# Get the commit sha for the last benchmark commit +PREV=$(git log -n 1 --grep='BENCHMARK_MARKER_DO_NOT_CHANGE' | head -n 1 | awk '{print $2}') + +## Run current benchmarks + +cat >> $COMMIT_FILE <&1 | tee -a $COMMIT_FILE +pushd layers +go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE +popd +cat >> $COMMIT_FILE <&1 | tee -a $COMMIT_FILE +fi + + + +## Reset to last benchmark commit, run benchmarks + +git checkout $PREV + +cat >> $COMMIT_FILE <&1 | tee -a $COMMIT_FILE +pushd layers +go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE +popd +cat >> $COMMIT_FILE <&1 | tee -a $COMMIT_FILE +fi + + + +## Reset back to the most recent commit, edit the commit message by appending +## benchmark results. +git checkout $BRANCH +git commit --amend -F $COMMIT_FILE diff --git a/vendor/github.com/google/gopacket/layerclass.go b/vendor/github.com/google/gopacket/layerclass.go new file mode 100644 index 0000000000..775cd09877 --- /dev/null +++ b/vendor/github.com/google/gopacket/layerclass.go @@ -0,0 +1,107 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +// LayerClass is a set of LayerTypes, used for grabbing one of a number of +// different types from a packet. +type LayerClass interface { + // Contains returns true if the given layer type should be considered part + // of this layer class. + Contains(LayerType) bool + // LayerTypes returns the set of all layer types in this layer class. + // Note that this may not be a fast operation on all LayerClass + // implementations. + LayerTypes() []LayerType +} + +// Contains implements LayerClass. +func (l LayerType) Contains(a LayerType) bool { + return l == a +} + +// LayerTypes implements LayerClass. +func (l LayerType) LayerTypes() []LayerType { + return []LayerType{l} +} + +// LayerClassSlice implements a LayerClass with a slice. +type LayerClassSlice []bool + +// Contains returns true if the given layer type should be considered part +// of this layer class. +func (s LayerClassSlice) Contains(t LayerType) bool { + return int(t) < len(s) && s[t] +} + +// LayerTypes returns all layer types in this LayerClassSlice. +// Because of LayerClassSlice's implementation, this could be quite slow. +func (s LayerClassSlice) LayerTypes() (all []LayerType) { + for i := 0; i < len(s); i++ { + if s[i] { + all = append(all, LayerType(i)) + } + } + return +} + +// NewLayerClassSlice creates a new LayerClassSlice by creating a slice of +// size max(types) and setting slice[t] to true for each type t. Note, if +// you implement your own LayerType and give it a high value, this WILL create +// a very large slice. +func NewLayerClassSlice(types []LayerType) LayerClassSlice { + var max LayerType + for _, typ := range types { + if typ > max { + max = typ + } + } + t := make([]bool, int(max+1)) + for _, typ := range types { + t[typ] = true + } + return t +} + +// LayerClassMap implements a LayerClass with a map. +type LayerClassMap map[LayerType]bool + +// Contains returns true if the given layer type should be considered part +// of this layer class. +func (m LayerClassMap) Contains(t LayerType) bool { + return m[t] +} + +// LayerTypes returns all layer types in this LayerClassMap. +func (m LayerClassMap) LayerTypes() (all []LayerType) { + for t := range m { + all = append(all, t) + } + return +} + +// NewLayerClassMap creates a LayerClassMap and sets map[t] to true for each +// type in types. +func NewLayerClassMap(types []LayerType) LayerClassMap { + m := LayerClassMap{} + for _, typ := range types { + m[typ] = true + } + return m +} + +// NewLayerClass creates a LayerClass, attempting to be smart about which type +// it creates based on which types are passed in. +func NewLayerClass(types []LayerType) LayerClass { + for _, typ := range types { + if typ > maxLayerType { + // NewLayerClassSlice could create a very large object, so instead create + // a map. + return NewLayerClassMap(types) + } + } + return NewLayerClassSlice(types) +} diff --git a/vendor/github.com/google/gopacket/layers/.lint_blacklist b/vendor/github.com/google/gopacket/layers/.lint_blacklist new file mode 100644 index 0000000000..fded4f6650 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/.lint_blacklist @@ -0,0 +1,39 @@ +dot11.go +eap.go +endpoints.go +enums_generated.go +enums.go +ethernet.go +geneve.go +icmp4.go +icmp6.go +igmp.go +ip4.go +ip6.go +layertypes.go +linux_sll.go +llc.go +lldp.go +mpls.go +ndp.go +ntp.go +ospf.go +pflog.go +pppoe.go +prism.go +radiotap.go +rudp.go +sctp.go +sflow.go +tcp.go +tcpip.go +tls.go +tls_alert.go +tls_appdata.go +tls_cipherspec.go +tls_hanshake.go +tls_test.go +udp.go +udplite.go +usb.go +vrrp.go diff --git a/vendor/github.com/google/gopacket/layers/arp.go b/vendor/github.com/google/gopacket/layers/arp.go new file mode 100644 index 0000000000..0775ac0b6d --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/arp.go @@ -0,0 +1,118 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/google/gopacket" +) + +// Potential values for ARP.Operation. +const ( + ARPRequest = 1 + ARPReply = 2 +) + +// ARP is a ARP packet header. +type ARP struct { + BaseLayer + AddrType LinkType + Protocol EthernetType + HwAddressSize uint8 + ProtAddressSize uint8 + Operation uint16 + SourceHwAddress []byte + SourceProtAddress []byte + DstHwAddress []byte + DstProtAddress []byte +} + +// LayerType returns LayerTypeARP +func (arp *ARP) LayerType() gopacket.LayerType { return LayerTypeARP } + +// DecodeFromBytes decodes the given bytes into this layer. +func (arp *ARP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + df.SetTruncated() + return fmt.Errorf("ARP length %d too short", len(data)) + } + arp.AddrType = LinkType(binary.BigEndian.Uint16(data[0:2])) + arp.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4])) + arp.HwAddressSize = data[4] + arp.ProtAddressSize = data[5] + arp.Operation = binary.BigEndian.Uint16(data[6:8]) + arpLength := 8 + 2*arp.HwAddressSize + 2*arp.ProtAddressSize + if len(data) < int(arpLength) { + df.SetTruncated() + return fmt.Errorf("ARP length %d too short, %d expected", len(data), arpLength) + } + arp.SourceHwAddress = data[8 : 8+arp.HwAddressSize] + arp.SourceProtAddress = data[8+arp.HwAddressSize : 8+arp.HwAddressSize+arp.ProtAddressSize] + arp.DstHwAddress = data[8+arp.HwAddressSize+arp.ProtAddressSize : 8+2*arp.HwAddressSize+arp.ProtAddressSize] + arp.DstProtAddress = data[8+2*arp.HwAddressSize+arp.ProtAddressSize : 8+2*arp.HwAddressSize+2*arp.ProtAddressSize] + + arp.Contents = data[:arpLength] + arp.Payload = data[arpLength:] + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (arp *ARP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + size := 8 + len(arp.SourceHwAddress) + len(arp.SourceProtAddress) + len(arp.DstHwAddress) + len(arp.DstProtAddress) + bytes, err := b.PrependBytes(size) + if err != nil { + return err + } + if opts.FixLengths { + if len(arp.SourceHwAddress) != len(arp.DstHwAddress) { + return errors.New("mismatched hardware address sizes") + } + arp.HwAddressSize = uint8(len(arp.SourceHwAddress)) + if len(arp.SourceProtAddress) != len(arp.DstProtAddress) { + return errors.New("mismatched prot address sizes") + } + arp.ProtAddressSize = uint8(len(arp.SourceProtAddress)) + } + binary.BigEndian.PutUint16(bytes, uint16(arp.AddrType)) + binary.BigEndian.PutUint16(bytes[2:], uint16(arp.Protocol)) + bytes[4] = arp.HwAddressSize + bytes[5] = arp.ProtAddressSize + binary.BigEndian.PutUint16(bytes[6:], arp.Operation) + start := 8 + for _, addr := range [][]byte{ + arp.SourceHwAddress, + arp.SourceProtAddress, + arp.DstHwAddress, + arp.DstProtAddress, + } { + copy(bytes[start:], addr) + start += len(addr) + } + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (arp *ARP) CanDecode() gopacket.LayerClass { + return LayerTypeARP +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (arp *ARP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func decodeARP(data []byte, p gopacket.PacketBuilder) error { + + arp := &ARP{} + return decodingLayerDecoder(arp, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/asf.go b/vendor/github.com/google/gopacket/layers/asf.go new file mode 100644 index 0000000000..d698bd0e53 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/asf.go @@ -0,0 +1,166 @@ +// Copyright 2019 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file in the root of the source tree. + +package layers + +// This file implements the ASF RMCP payload specified in section 3.2.2.3 of +// https://www.dmtf.org/sites/default/files/standards/documents/DSP0136.pdf + +import ( + "encoding/binary" + "fmt" + + "github.com/google/gopacket" +) + +const ( + // ASFRMCPEnterprise is the IANA-assigned Enterprise Number of the ASF-RMCP. + ASFRMCPEnterprise uint32 = 4542 +) + +// ASFDataIdentifier encapsulates fields used to uniquely identify the format of +// the data block. +// +// While the enterprise number is almost always 4542 (ASF-RMCP), we support +// registering layers using structs of this type as a key in case any users are +// using OEM-extensions. +type ASFDataIdentifier struct { + + // Enterprise is the IANA Enterprise Number associated with the entity that + // defines the message type. A list can be found at + // https://www.iana.org/assignments/enterprise-numbers/enterprise-numbers. + // This can be thought of as the namespace for the message type. + Enterprise uint32 + + // Type is the message type, defined by the entity associated with the + // enterprise above. No pressure, but in the context of EN 4542, 1 byte is + // the difference between sending a ping and telling a machine to do an + // unconditional power down (0x80 and 0x12 respectively). + Type uint8 +} + +// LayerType returns the payload layer type corresponding to an ASF message +// type. +func (a ASFDataIdentifier) LayerType() gopacket.LayerType { + if lt := asfDataLayerTypes[a]; lt != 0 { + return lt + } + + // some layer types don't have a payload, e.g. ASF-RMCP Presence Ping. + return gopacket.LayerTypePayload +} + +// RegisterASFLayerType allows specifying that the data block of ASF packets +// with a given enterprise number and type should be processed by a given layer +// type. This overrides any existing registrations, including defaults. +func RegisterASFLayerType(a ASFDataIdentifier, l gopacket.LayerType) { + asfDataLayerTypes[a] = l +} + +var ( + // ASFDataIdentifierPresencePong is the message type of the response to a + // Presence Ping message. It indicates the sender is ASF-RMCP-aware. + ASFDataIdentifierPresencePong = ASFDataIdentifier{ + Enterprise: ASFRMCPEnterprise, + Type: 0x40, + } + + // ASFDataIdentifierPresencePing is a message type sent to a managed client + // to solicit a Presence Pong response. Clients may ignore this if the RMCP + // version is unsupported. Sending this message with a sequence number <255 + // is the recommended way of finding out whether an implementation sends + // RMCP ACKs (e.g. iDRAC does, Super Micro does not). + // + // Systems implementing IPMI must respond to this ping to conform to the + // spec, so it is a good substitute for an ICMP ping. + ASFDataIdentifierPresencePing = ASFDataIdentifier{ + Enterprise: ASFRMCPEnterprise, + Type: 0x80, + } + + // asfDataLayerTypes is used to find the next layer for a given ASF header. + asfDataLayerTypes = map[ASFDataIdentifier]gopacket.LayerType{ + ASFDataIdentifierPresencePong: LayerTypeASFPresencePong, + } +) + +// ASF defines ASF's generic RMCP message Data block format. See section +// 3.2.2.3. +type ASF struct { + BaseLayer + ASFDataIdentifier + + // Tag is used to match request/response pairs. The tag of a response is set + // to that of the message it is responding to. If a message is + // unidirectional, i.e. not part of a request/response pair, this is set to + // 255. + Tag uint8 + + // 1 byte reserved, set to 0x00. + + // Length is the length of this layer's payload in bytes. + Length uint8 +} + +// LayerType returns LayerTypeASF. It partially satisfies Layer and +// SerializableLayer. +func (*ASF) LayerType() gopacket.LayerType { + return LayerTypeASF +} + +// CanDecode returns LayerTypeASF. It partially satisfies DecodingLayer. +func (a *ASF) CanDecode() gopacket.LayerClass { + return a.LayerType() +} + +// DecodeFromBytes makes the layer represent the provided bytes. It partially +// satisfies DecodingLayer. +func (a *ASF) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + df.SetTruncated() + return fmt.Errorf("invalid ASF data header, length %v less than 8", + len(data)) + } + + a.BaseLayer.Contents = data[:8] + a.BaseLayer.Payload = data[8:] + + a.Enterprise = binary.BigEndian.Uint32(data[:4]) + a.Type = uint8(data[4]) + a.Tag = uint8(data[5]) + // 1 byte reserved + a.Length = uint8(data[7]) + return nil +} + +// NextLayerType returns the layer type corresponding to the message type of +// this ASF data layer. This partially satisfies DecodingLayer. +func (a *ASF) NextLayerType() gopacket.LayerType { + return a.ASFDataIdentifier.LayerType() +} + +// SerializeTo writes the serialized fom of this layer into the SerializeBuffer, +// partially satisfying SerializableLayer. +func (a *ASF) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + payload := b.Bytes() + bytes, err := b.PrependBytes(8) + if err != nil { + return err + } + binary.BigEndian.PutUint32(bytes[:4], a.Enterprise) + bytes[4] = uint8(a.Type) + bytes[5] = a.Tag + bytes[6] = 0x00 + if opts.FixLengths { + a.Length = uint8(len(payload)) + } + bytes[7] = a.Length + return nil +} + +// decodeASF decodes the byte slice into an RMCP-ASF data struct. +func decodeASF(data []byte, p gopacket.PacketBuilder) error { + return decodingLayerDecoder(&ASF{}, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/asf_presencepong.go b/vendor/github.com/google/gopacket/layers/asf_presencepong.go new file mode 100644 index 0000000000..e9a8baf16c --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/asf_presencepong.go @@ -0,0 +1,194 @@ +// Copyright 2019 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file in the root of the source tree. + +package layers + +// This file implements the RMCP ASF Presence Pong message, specified in section +// 3.2.4.3 of +// https://www.dmtf.org/sites/default/files/standards/documents/DSP0136.pdf. It +// also contains non-competing elements from IPMI v2.0, specified in section +// 13.2.4 of +// https://www.intel.com/content/dam/www/public/us/en/documents/specification-updates/ipmi-intelligent-platform-mgt-interface-spec-2nd-gen-v2-0-spec-update.pdf. + +import ( + "encoding/binary" + "fmt" + + "github.com/google/gopacket" +) + +type ( + // ASFEntity is the type of individual entities that a Presence Pong + // response can indicate support of. The entities currently implemented by + // the spec are IPMI and ASFv1. + ASFEntity uint8 + + // ASFInteraction is the type of individual interactions that a Presence + // Pong response can indicate support for. The interactions currently + // implemented by the spec are RMCP security extensions. Although not + // specified, IPMI uses this field to indicate support for DASH, which is + // supported as well. + ASFInteraction uint8 +) + +const ( + // ASFDCMIEnterprise is the IANA-assigned Enterprise Number of the Data + // Center Manageability Interface Forum. The Presence Pong response's + // Enterprise field being set to this value indicates support for DCMI. The + // DCMI spec regards the OEM field as reserved, so these should be null. + ASFDCMIEnterprise uint32 = 36465 + + // ASFPresencePongEntityIPMI ANDs with Presence Pong's supported entities + // field if the managed system supports IPMI. + ASFPresencePongEntityIPMI ASFEntity = 1 << 7 + + // ASFPresencePongEntityASFv1 ANDs with Presence Pong's supported entities + // field if the managed system supports ASF v1.0. + ASFPresencePongEntityASFv1 ASFEntity = 1 + + // ASFPresencePongInteractionSecurityExtensions ANDs with Presence Pong's + // supported interactions field if the managed system supports RMCP v2.0 + // security extensions. See section 3.2.3. + ASFPresencePongInteractionSecurityExtensions ASFInteraction = 1 << 7 + + // ASFPresencePongInteractionDASH ANDs with Presence Pong's supported + // interactions field if the managed system supports DMTF DASH. See + // https://www.dmtf.org/standards/dash. + ASFPresencePongInteractionDASH ASFInteraction = 1 << 5 +) + +// ASFPresencePong defines the structure of a Presence Pong message's payload. +// See section 3.2.4.3. +type ASFPresencePong struct { + BaseLayer + + // Enterprise is the IANA Enterprise Number of an entity that has defined + // OEM-specific capabilities for the managed client. If no such capabilities + // exist, this is set to ASF's IANA Enterprise Number. + Enterprise uint32 + + // OEM identifies OEM-specific capabilities. Its structure is defined by the + // OEM. This is set to 0s if no OEM-specific capabilities exist. This + // implementation does not change byte order from the wire for this field. + OEM [4]byte + + // We break out entities and interactions into separate booleans as + // discovery is the entire point of this type of message, so we assume they + // are accessed. It also makes gopacket's default layer printing more + // useful. + + // IPMI is true if IPMI is supported by the managed system. There is no + // explicit version in the specification, however given the dates, this is + // assumed to be IPMI v1.0. Support for IPMI is contained in the "supported + // entities" field of the presence pong payload. + IPMI bool + + // ASFv1 indicates support for ASF v1.0. This seems somewhat redundant as + // ASF must be supported in order to receive a response. This is contained + // in the "supported entities" field of the presence pong payload. + ASFv1 bool + + // SecurityExtensions indicates support for RMCP Security Extensions, + // specified in ASF v2.0. This will always be false for v1.x + // implementations. This is contained in the "supported interactions" field + // of the presence pong payload. This field is defined in ASF v1.0, but has + // no useful value. + SecurityExtensions bool + + // DASH is true if DMTF DASH is supported. This is not specified in ASF + // v2.0, but in IPMI v2.0, however the former does not preclude it, so we + // support it. + DASH bool + + // 6 bytes reserved after the entities and interactions fields, set to 0s. +} + +// SupportsDCMI returns whether the Presence Pong message indicates support for +// the Data Center Management Interface, which is an extension of IPMI v2.0. +func (a *ASFPresencePong) SupportsDCMI() bool { + return a.Enterprise == ASFDCMIEnterprise && a.IPMI && a.ASFv1 +} + +// LayerType returns LayerTypeASFPresencePong. It partially satisfies Layer and +// SerializableLayer. +func (*ASFPresencePong) LayerType() gopacket.LayerType { + return LayerTypeASFPresencePong +} + +// CanDecode returns LayerTypeASFPresencePong. It partially satisfies +// DecodingLayer. +func (a *ASFPresencePong) CanDecode() gopacket.LayerClass { + return a.LayerType() +} + +// DecodeFromBytes makes the layer represent the provided bytes. It partially +// satisfies DecodingLayer. +func (a *ASFPresencePong) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 16 { + df.SetTruncated() + return fmt.Errorf("invalid ASF presence pong payload, length %v less than 16", + len(data)) + } + + a.BaseLayer.Contents = data[:16] + a.BaseLayer.Payload = data[16:] + + a.Enterprise = binary.BigEndian.Uint32(data[:4]) + copy(a.OEM[:], data[4:8]) // N.B. no byte order change + a.IPMI = data[8]&uint8(ASFPresencePongEntityIPMI) != 0 + a.ASFv1 = data[8]&uint8(ASFPresencePongEntityASFv1) != 0 + a.SecurityExtensions = data[9]&uint8(ASFPresencePongInteractionSecurityExtensions) != 0 + a.DASH = data[9]&uint8(ASFPresencePongInteractionDASH) != 0 + // ignore remaining 6 bytes; should be set to 0s + return nil +} + +// NextLayerType returns LayerTypePayload, as there are no further layers to +// decode. This partially satisfies DecodingLayer. +func (a *ASFPresencePong) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// SerializeTo writes the serialized fom of this layer into the SerializeBuffer, +// partially satisfying SerializableLayer. +func (a *ASFPresencePong) SerializeTo(b gopacket.SerializeBuffer, _ gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(16) + if err != nil { + return err + } + + binary.BigEndian.PutUint32(bytes[:4], a.Enterprise) + + copy(bytes[4:8], a.OEM[:]) + + bytes[8] = 0 + if a.IPMI { + bytes[8] |= uint8(ASFPresencePongEntityIPMI) + } + if a.ASFv1 { + bytes[8] |= uint8(ASFPresencePongEntityASFv1) + } + + bytes[9] = 0 + if a.SecurityExtensions { + bytes[9] |= uint8(ASFPresencePongInteractionSecurityExtensions) + } + if a.DASH { + bytes[9] |= uint8(ASFPresencePongInteractionDASH) + } + + // zero-out remaining 6 bytes + for i := 10; i < len(bytes); i++ { + bytes[i] = 0x00 + } + + return nil +} + +// decodeASFPresencePong decodes the byte slice into an RMCP-ASF Presence Pong +// struct. +func decodeASFPresencePong(data []byte, p gopacket.PacketBuilder) error { + return decodingLayerDecoder(&ASFPresencePong{}, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/base.go b/vendor/github.com/google/gopacket/layers/base.go new file mode 100644 index 0000000000..cd59b46786 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/base.go @@ -0,0 +1,52 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "github.com/google/gopacket" +) + +// BaseLayer is a convenience struct which implements the LayerData and +// LayerPayload functions of the Layer interface. +type BaseLayer struct { + // Contents is the set of bytes that make up this layer. IE: for an + // Ethernet packet, this would be the set of bytes making up the + // Ethernet frame. + Contents []byte + // Payload is the set of bytes contained by (but not part of) this + // Layer. Again, to take Ethernet as an example, this would be the + // set of bytes encapsulated by the Ethernet protocol. + Payload []byte +} + +// LayerContents returns the bytes of the packet layer. +func (b *BaseLayer) LayerContents() []byte { return b.Contents } + +// LayerPayload returns the bytes contained within the packet layer. +func (b *BaseLayer) LayerPayload() []byte { return b.Payload } + +type layerDecodingLayer interface { + gopacket.Layer + DecodeFromBytes([]byte, gopacket.DecodeFeedback) error + NextLayerType() gopacket.LayerType +} + +func decodingLayerDecoder(d layerDecodingLayer, data []byte, p gopacket.PacketBuilder) error { + err := d.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(d) + next := d.NextLayerType() + if next == gopacket.LayerTypeZero { + return nil + } + return p.NextDecoder(next) +} + +// hacky way to zero out memory... there must be a better way? +var lotsOfZeros [1024]byte diff --git a/vendor/github.com/google/gopacket/layers/bfd.go b/vendor/github.com/google/gopacket/layers/bfd.go new file mode 100644 index 0000000000..43030fb6a5 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/bfd.go @@ -0,0 +1,481 @@ +// Copyright 2017 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. +// + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/google/gopacket" +) + +// BFD Control Packet Format +// ------------------------- +// The current version of BFD's RFC (RFC 5880) contains the following +// diagram for the BFD Control packet format: +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |Vers | Diag |Sta|P|F|C|A|D|M| Detect Mult | Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | My Discriminator | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Your Discriminator | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Desired Min TX Interval | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Required Min RX Interval | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Required Min Echo RX Interval | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// An optional Authentication Section MAY be present: +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Auth Type | Auth Len | Authentication Data... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// +// Simple Password Authentication Section Format +// --------------------------------------------- +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Auth Type | Auth Len | Auth Key ID | Password... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | ... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// +// Keyed MD5 and Meticulous Keyed MD5 Authentication Section Format +// ---------------------------------------------------------------- +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Auth Type | Auth Len | Auth Key ID | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Sequence Number | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Auth Key/Digest... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | ... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// +// Keyed SHA1 and Meticulous Keyed SHA1 Authentication Section Format +// ------------------------------------------------------------------ +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Auth Type | Auth Len | Auth Key ID | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Sequence Number | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Auth Key/Hash... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | ... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// From https://tools.ietf.org/rfc/rfc5880.txt +const bfdMinimumRecordSizeInBytes int = 24 + +// BFDVersion represents the version as decoded from the BFD control message +type BFDVersion uint8 + +// BFDDiagnostic represents diagnostic infomation about a BFD session +type BFDDiagnostic uint8 + +// constants that define BFDDiagnostic flags +const ( + BFDDiagnosticNone BFDDiagnostic = 0 // No Diagnostic + BFDDiagnosticTimeExpired BFDDiagnostic = 1 // Control Detection Time Expired + BFDDiagnosticEchoFailed BFDDiagnostic = 2 // Echo Function Failed + BFDDiagnosticNeighborSignalDown BFDDiagnostic = 3 // Neighbor Signaled Session Down + BFDDiagnosticForwardPlaneReset BFDDiagnostic = 4 // Forwarding Plane Reset + BFDDiagnosticPathDown BFDDiagnostic = 5 // Path Down + BFDDiagnosticConcatPathDown BFDDiagnostic = 6 // Concatenated Path Down + BFDDiagnosticAdminDown BFDDiagnostic = 7 // Administratively Down + BFDDiagnosticRevConcatPathDown BFDDiagnostic = 8 // Reverse Concatenated Path Dow +) + +// String returns a string version of BFDDiagnostic +func (bd BFDDiagnostic) String() string { + switch bd { + default: + return "Unknown" + case BFDDiagnosticNone: + return "None" + case BFDDiagnosticTimeExpired: + return "Control Detection Time Expired" + case BFDDiagnosticEchoFailed: + return "Echo Function Failed" + case BFDDiagnosticNeighborSignalDown: + return "Neighbor Signaled Session Down" + case BFDDiagnosticForwardPlaneReset: + return "Forwarding Plane Reset" + case BFDDiagnosticPathDown: + return "Path Down" + case BFDDiagnosticConcatPathDown: + return "Concatenated Path Down" + case BFDDiagnosticAdminDown: + return "Administratively Down" + case BFDDiagnosticRevConcatPathDown: + return "Reverse Concatenated Path Down" + } +} + +// BFDState represents the state of a BFD session +type BFDState uint8 + +// constants that define BFDState +const ( + BFDStateAdminDown BFDState = 0 + BFDStateDown BFDState = 1 + BFDStateInit BFDState = 2 + BFDStateUp BFDState = 3 +) + +// String returns a string version of BFDState +func (s BFDState) String() string { + switch s { + default: + return "Unknown" + case BFDStateAdminDown: + return "Admin Down" + case BFDStateDown: + return "Down" + case BFDStateInit: + return "Init" + case BFDStateUp: + return "Up" + } +} + +// BFDDetectMultiplier represents the negotiated transmit interval, +// multiplied by this value, provides the Detection Time for the +// receiving system in Asynchronous mode. +type BFDDetectMultiplier uint8 + +// BFDDiscriminator is a unique, nonzero discriminator value used +// to demultiplex multiple BFD sessions between the same pair of systems. +type BFDDiscriminator uint32 + +// BFDTimeInterval represents a time interval in microseconds +type BFDTimeInterval uint32 + +// BFDAuthType represents the authentication used in the BFD session +type BFDAuthType uint8 + +// constants that define the BFDAuthType +const ( + BFDAuthTypeNone BFDAuthType = 0 // No Auth + BFDAuthTypePassword BFDAuthType = 1 // Simple Password + BFDAuthTypeKeyedMD5 BFDAuthType = 2 // Keyed MD5 + BFDAuthTypeMeticulousKeyedMD5 BFDAuthType = 3 // Meticulous Keyed MD5 + BFDAuthTypeKeyedSHA1 BFDAuthType = 4 // Keyed SHA1 + BFDAuthTypeMeticulousKeyedSHA1 BFDAuthType = 5 // Meticulous Keyed SHA1 +) + +// String returns a string version of BFDAuthType +func (at BFDAuthType) String() string { + switch at { + default: + return "Unknown" + case BFDAuthTypeNone: + return "No Authentication" + case BFDAuthTypePassword: + return "Simple Password" + case BFDAuthTypeKeyedMD5: + return "Keyed MD5" + case BFDAuthTypeMeticulousKeyedMD5: + return "Meticulous Keyed MD5" + case BFDAuthTypeKeyedSHA1: + return "Keyed SHA1" + case BFDAuthTypeMeticulousKeyedSHA1: + return "Meticulous Keyed SHA1" + } +} + +// BFDAuthKeyID represents the authentication key ID in use for +// this packet. This allows multiple keys to be active simultaneously. +type BFDAuthKeyID uint8 + +// BFDAuthSequenceNumber represents the sequence number for this packet. +// For Keyed Authentication, this value is incremented occasionally. For +// Meticulous Keyed Authentication, this value is incremented for each +// successive packet transmitted for a session. This provides protection +// against replay attacks. +type BFDAuthSequenceNumber uint32 + +// BFDAuthData represents the authentication key or digest +type BFDAuthData []byte + +// BFDAuthHeader represents authentication data used in the BFD session +type BFDAuthHeader struct { + AuthType BFDAuthType + KeyID BFDAuthKeyID + SequenceNumber BFDAuthSequenceNumber + Data BFDAuthData +} + +// Length returns the data length of the BFDAuthHeader based on the +// authentication type +func (h *BFDAuthHeader) Length() int { + switch h.AuthType { + case BFDAuthTypePassword: + return 3 + len(h.Data) + case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5: + return 8 + len(h.Data) + case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1: + return 8 + len(h.Data) + default: + return 0 + } +} + +// BFD represents a BFD control message packet whose payload contains +// the control information required to for a BFD session. +// +// References +// ---------- +// +// Wikipedia's BFD entry: +// https://en.wikipedia.org/wiki/Bidirectional_Forwarding_Detection +// This is the best place to get an overview of BFD. +// +// RFC 5880 "Bidirectional Forwarding Detection (BFD)" (2010) +// https://tools.ietf.org/html/rfc5880 +// This is the original BFD specification. +// +// RFC 5881 "Bidirectional Forwarding Detection (BFD) for IPv4 and IPv6 (Single Hop)" (2010) +// https://tools.ietf.org/html/rfc5881 +// Describes the use of the Bidirectional Forwarding Detection (BFD) +// protocol over IPv4 and IPv6 for single IP hops. +type BFD struct { + BaseLayer // Stores the packet bytes and payload bytes. + + Version BFDVersion // Version of the BFD protocol. + Diagnostic BFDDiagnostic // Diagnostic code for last state change + State BFDState // Current state + Poll bool // Requesting verification + Final bool // Responding to a received BFD Control packet that had the Poll (P) bit set. + ControlPlaneIndependent bool // BFD implementation does not share fate with its control plane + AuthPresent bool // Authentication Section is present and the session is to be authenticated + Demand bool // Demand mode is active + Multipoint bool // For future point-to-multipoint extensions. Must always be zero + DetectMultiplier BFDDetectMultiplier // Detection time multiplier + MyDiscriminator BFDDiscriminator // A unique, nonzero discriminator value + YourDiscriminator BFDDiscriminator // discriminator received from the remote system. + DesiredMinTxInterval BFDTimeInterval // Minimum interval, in microseconds, the local system would like to use when transmitting BFD Control packets + RequiredMinRxInterval BFDTimeInterval // Minimum interval, in microseconds, between received BFD Control packets that this system is capable of supporting + RequiredMinEchoRxInterval BFDTimeInterval // Minimum interval, in microseconds, between received BFD Echo packets that this system is capable of supporting + AuthHeader *BFDAuthHeader // Authentication data, variable length. +} + +// Length returns the data length of a BFD Control message which +// changes based on the presence and type of authentication +// contained in the message +func (d *BFD) Length() int { + if d.AuthPresent && (d.AuthHeader != nil) { + return bfdMinimumRecordSizeInBytes + d.AuthHeader.Length() + } + + return bfdMinimumRecordSizeInBytes +} + +// LayerType returns the layer type of the BFD object, which is LayerTypeBFD. +func (d *BFD) LayerType() gopacket.LayerType { + return LayerTypeBFD +} + +// decodeBFD analyses a byte slice and attempts to decode it as a BFD +// control packet +// +// If it succeeds, it loads p with information about the packet and returns nil. +// If it fails, it returns an error (non nil). +// +// This function is employed in layertypes.go to register the BFD layer. +func decodeBFD(data []byte, p gopacket.PacketBuilder) error { + + // Attempt to decode the byte slice. + d := &BFD{} + err := d.DecodeFromBytes(data, p) + if err != nil { + return err + } + + // If the decoding worked, add the layer to the packet and set it + // as the application layer too, if there isn't already one. + p.AddLayer(d) + p.SetApplicationLayer(d) + + return nil +} + +// DecodeFromBytes analyses a byte slice and attempts to decode it as a BFD +// control packet. +// +// Upon succeeds, it loads the BFD object with information about the packet +// and returns nil. +// Upon failure, it returns an error (non nil). +func (d *BFD) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + + // If the data block is too short to be a BFD record, then return an error. + if len(data) < bfdMinimumRecordSizeInBytes { + df.SetTruncated() + return errors.New("BFD packet too short") + } + + pLen := uint8(data[3]) + if len(data) != int(pLen) { + return errors.New("BFD packet length does not match") + } + + // BFD type embeds type BaseLayer which contains two fields: + // Contents is supposed to contain the bytes of the data at this level. + // Payload is supposed to contain the payload of this level. + // Here we set the baselayer to be the bytes of the BFD record. + d.BaseLayer = BaseLayer{Contents: data[:len(data)]} + + // Extract the fields from the block of bytes. + // To make sense of this, refer to the packet diagram + // above and the section on endian conventions. + + // The first few fields are all packed into the first 32 bits. Unpack them. + d.Version = BFDVersion(((data[0] & 0xE0) >> 5)) + d.Diagnostic = BFDDiagnostic(data[0] & 0x1F) + data = data[1:] + + d.State = BFDState((data[0] & 0xC0) >> 6) + d.Poll = data[0]&0x20 != 0 + d.Final = data[0]&0x10 != 0 + d.ControlPlaneIndependent = data[0]&0x08 != 0 + d.AuthPresent = data[0]&0x04 != 0 + d.Demand = data[0]&0x02 != 0 + d.Multipoint = data[0]&0x01 != 0 + data = data[1:] + + data, d.DetectMultiplier = data[1:], BFDDetectMultiplier(data[0]) + data, _ = data[1:], uint8(data[0]) // Consume length + + // The remaining fields can just be copied in big endian order. + data, d.MyDiscriminator = data[4:], BFDDiscriminator(binary.BigEndian.Uint32(data[:4])) + data, d.YourDiscriminator = data[4:], BFDDiscriminator(binary.BigEndian.Uint32(data[:4])) + data, d.DesiredMinTxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4])) + data, d.RequiredMinRxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4])) + data, d.RequiredMinEchoRxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4])) + + if d.AuthPresent && (len(data) > 2) { + d.AuthHeader = &BFDAuthHeader{} + data, d.AuthHeader.AuthType = data[1:], BFDAuthType(data[0]) + data, _ = data[1:], uint8(data[0]) // Consume length + data, d.AuthHeader.KeyID = data[1:], BFDAuthKeyID(data[0]) + + switch d.AuthHeader.AuthType { + case BFDAuthTypePassword: + d.AuthHeader.Data = BFDAuthData(data) + case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5: + // Skipped reserved byte + data, d.AuthHeader.SequenceNumber = data[5:], BFDAuthSequenceNumber(binary.BigEndian.Uint32(data[1:5])) + d.AuthHeader.Data = BFDAuthData(data) + case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1: + // Skipped reserved byte + data, d.AuthHeader.SequenceNumber = data[5:], BFDAuthSequenceNumber(binary.BigEndian.Uint32(data[1:5])) + d.AuthHeader.Data = BFDAuthData(data) + } + } + + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (d *BFD) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + data, err := b.PrependBytes(bfdMinimumRecordSizeInBytes) + if err != nil { + return err + } + + // Pack the first few fields into the first 32 bits. + data[0] = byte(byte(d.Version<<5) | byte(d.Diagnostic)) + h := uint8(0) + h |= (uint8(d.State) << 6) + h |= (uint8(bool2uint8(d.Poll)) << 5) + h |= (uint8(bool2uint8(d.Final)) << 4) + h |= (uint8(bool2uint8(d.ControlPlaneIndependent)) << 3) + h |= (uint8(bool2uint8(d.AuthPresent)) << 2) + h |= (uint8(bool2uint8(d.Demand)) << 1) + h |= uint8(bool2uint8(d.Multipoint)) + data[1] = byte(h) + data[2] = byte(d.DetectMultiplier) + data[3] = byte(d.Length()) + + // The remaining fields can just be copied in big endian order. + binary.BigEndian.PutUint32(data[4:], uint32(d.MyDiscriminator)) + binary.BigEndian.PutUint32(data[8:], uint32(d.YourDiscriminator)) + binary.BigEndian.PutUint32(data[12:], uint32(d.DesiredMinTxInterval)) + binary.BigEndian.PutUint32(data[16:], uint32(d.RequiredMinRxInterval)) + binary.BigEndian.PutUint32(data[20:], uint32(d.RequiredMinEchoRxInterval)) + + if d.AuthPresent && (d.AuthHeader != nil) { + auth, err := b.AppendBytes(int(d.AuthHeader.Length())) + if err != nil { + return err + } + + auth[0] = byte(d.AuthHeader.AuthType) + auth[1] = byte(d.AuthHeader.Length()) + auth[2] = byte(d.AuthHeader.KeyID) + + switch d.AuthHeader.AuthType { + case BFDAuthTypePassword: + copy(auth[3:], d.AuthHeader.Data) + case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5: + auth[3] = byte(0) + binary.BigEndian.PutUint32(auth[4:], uint32(d.AuthHeader.SequenceNumber)) + copy(auth[8:], d.AuthHeader.Data) + case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1: + auth[3] = byte(0) + binary.BigEndian.PutUint32(auth[4:], uint32(d.AuthHeader.SequenceNumber)) + copy(auth[8:], d.AuthHeader.Data) + } + } + + return nil +} + +// CanDecode returns a set of layers that BFD objects can decode. +// As BFD objects can only decide the BFD layer, we can return just that layer. +// Apparently a single layer type implements LayerClass. +func (d *BFD) CanDecode() gopacket.LayerClass { + return LayerTypeBFD +} + +// NextLayerType specifies the next layer that GoPacket should attempt to +// analyse after this (BFD) layer. As BFD packets do not contain any payload +// bytes, there are no further layers to analyse. +func (d *BFD) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// Payload returns an empty byte slice as BFD packets do not carry a payload +func (d *BFD) Payload() []byte { + return nil +} + +// bool2uint8 converts a bool to uint8 +func bool2uint8(b bool) uint8 { + if b { + return 1 + } + return 0 +} diff --git a/vendor/github.com/google/gopacket/layers/cdp.go b/vendor/github.com/google/gopacket/layers/cdp.go new file mode 100644 index 0000000000..095f926125 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/cdp.go @@ -0,0 +1,659 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +// Enum types courtesy of... +// http://search.cpan.org/~mchapman/Net-CDP-0.09/lib/Net/CDP.pm +// https://code.google.com/p/ladvd/ +// http://anonsvn.wireshark.org/viewvc/releases/wireshark-1.8.6/epan/dissectors/packet-cdp.c + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + + "github.com/google/gopacket" +) + +// CDPTLVType is the type of each TLV value in a CiscoDiscovery packet. +type CDPTLVType uint16 + +// CDPTLVType values. +const ( + CDPTLVDevID CDPTLVType = 0x0001 + CDPTLVAddress CDPTLVType = 0x0002 + CDPTLVPortID CDPTLVType = 0x0003 + CDPTLVCapabilities CDPTLVType = 0x0004 + CDPTLVVersion CDPTLVType = 0x0005 + CDPTLVPlatform CDPTLVType = 0x0006 + CDPTLVIPPrefix CDPTLVType = 0x0007 + CDPTLVHello CDPTLVType = 0x0008 + CDPTLVVTPDomain CDPTLVType = 0x0009 + CDPTLVNativeVLAN CDPTLVType = 0x000a + CDPTLVFullDuplex CDPTLVType = 0x000b + CDPTLVVLANReply CDPTLVType = 0x000e + CDPTLVVLANQuery CDPTLVType = 0x000f + CDPTLVPower CDPTLVType = 0x0010 + CDPTLVMTU CDPTLVType = 0x0011 + CDPTLVExtendedTrust CDPTLVType = 0x0012 + CDPTLVUntrustedCOS CDPTLVType = 0x0013 + CDPTLVSysName CDPTLVType = 0x0014 + CDPTLVSysOID CDPTLVType = 0x0015 + CDPTLVMgmtAddresses CDPTLVType = 0x0016 + CDPTLVLocation CDPTLVType = 0x0017 + CDPTLVExternalPortID CDPTLVType = 0x0018 + CDPTLVPowerRequested CDPTLVType = 0x0019 + CDPTLVPowerAvailable CDPTLVType = 0x001a + CDPTLVPortUnidirectional CDPTLVType = 0x001b + CDPTLVEnergyWise CDPTLVType = 0x001d + CDPTLVSparePairPOE CDPTLVType = 0x001f +) + +// CiscoDiscoveryValue is a TLV value inside a CiscoDiscovery packet layer. +type CiscoDiscoveryValue struct { + Type CDPTLVType + Length uint16 + Value []byte +} + +// CiscoDiscovery is a packet layer containing the Cisco Discovery Protocol. +// See http://www.cisco.com/univercd/cc/td/doc/product/lan/trsrb/frames.htm#31885 +type CiscoDiscovery struct { + BaseLayer + Version byte + TTL byte + Checksum uint16 + Values []CiscoDiscoveryValue +} + +// CDPCapability is the set of capabilities advertised by a CDP device. +type CDPCapability uint32 + +// CDPCapability values. +const ( + CDPCapMaskRouter CDPCapability = 0x0001 + CDPCapMaskTBBridge CDPCapability = 0x0002 + CDPCapMaskSPBridge CDPCapability = 0x0004 + CDPCapMaskSwitch CDPCapability = 0x0008 + CDPCapMaskHost CDPCapability = 0x0010 + CDPCapMaskIGMPFilter CDPCapability = 0x0020 + CDPCapMaskRepeater CDPCapability = 0x0040 + CDPCapMaskPhone CDPCapability = 0x0080 + CDPCapMaskRemote CDPCapability = 0x0100 +) + +// CDPCapabilities represents the capabilities of a device +type CDPCapabilities struct { + L3Router bool + TBBridge bool + SPBridge bool + L2Switch bool + IsHost bool + IGMPFilter bool + L1Repeater bool + IsPhone bool + RemotelyManaged bool +} + +// CDP Power-over-Ethernet values. +const ( + CDPPoEFourWire byte = 0x01 + CDPPoEPDArch byte = 0x02 + CDPPoEPDRequest byte = 0x04 + CDPPoEPSE byte = 0x08 +) + +// CDPSparePairPoE provides information on PoE. +type CDPSparePairPoE struct { + PSEFourWire bool // Supported / Not supported + PDArchShared bool // Shared / Independent + PDRequestOn bool // On / Off + PSEOn bool // On / Off +} + +// CDPVLANDialogue encapsulates a VLAN Query/Reply +type CDPVLANDialogue struct { + ID uint8 + VLAN uint16 +} + +// CDPPowerDialogue encapsulates a Power Query/Reply +type CDPPowerDialogue struct { + ID uint16 + MgmtID uint16 + Values []uint32 +} + +// CDPLocation provides location information for a CDP device. +type CDPLocation struct { + Type uint8 // Undocumented + Location string +} + +// CDPHello is a Cisco Hello message (undocumented, hence the "Unknown" fields) +type CDPHello struct { + OUI []byte + ProtocolID uint16 + ClusterMaster net.IP + Unknown1 net.IP + Version byte + SubVersion byte + Status byte + Unknown2 byte + ClusterCommander net.HardwareAddr + SwitchMAC net.HardwareAddr + Unknown3 byte + ManagementVLAN uint16 +} + +// CDPEnergyWiseSubtype is used within CDP to define TLV values. +type CDPEnergyWiseSubtype uint32 + +// CDPEnergyWiseSubtype values. +const ( + CDPEnergyWiseRole CDPEnergyWiseSubtype = 0x00000007 + CDPEnergyWiseDomain CDPEnergyWiseSubtype = 0x00000008 + CDPEnergyWiseName CDPEnergyWiseSubtype = 0x00000009 + CDPEnergyWiseReplyTo CDPEnergyWiseSubtype = 0x00000017 +) + +// CDPEnergyWise is used by CDP to monitor and control power usage. +type CDPEnergyWise struct { + EncryptedData []byte + Unknown1 uint32 + SequenceNumber uint32 + ModelNumber string + Unknown2 uint16 + HardwareID string + SerialNum string + Unknown3 []byte + Role string + Domain string + Name string + ReplyUnknown1 []byte + ReplyPort []byte + ReplyAddress []byte + ReplyUnknown2 []byte + ReplyUnknown3 []byte +} + +// CiscoDiscoveryInfo represents the decoded details for a set of CiscoDiscoveryValues +type CiscoDiscoveryInfo struct { + BaseLayer + CDPHello + DeviceID string + Addresses []net.IP + PortID string + Capabilities CDPCapabilities + Version string + Platform string + IPPrefixes []net.IPNet + VTPDomain string + NativeVLAN uint16 + FullDuplex bool + VLANReply CDPVLANDialogue + VLANQuery CDPVLANDialogue + PowerConsumption uint16 + MTU uint32 + ExtendedTrust uint8 + UntrustedCOS uint8 + SysName string + SysOID string + MgmtAddresses []net.IP + Location CDPLocation + PowerRequest CDPPowerDialogue + PowerAvailable CDPPowerDialogue + SparePairPoe CDPSparePairPoE + EnergyWise CDPEnergyWise + Unknown []CiscoDiscoveryValue +} + +// LayerType returns gopacket.LayerTypeCiscoDiscovery. +func (c *CiscoDiscovery) LayerType() gopacket.LayerType { + return LayerTypeCiscoDiscovery +} + +func decodeCiscoDiscovery(data []byte, p gopacket.PacketBuilder) error { + c := &CiscoDiscovery{ + Version: data[0], + TTL: data[1], + Checksum: binary.BigEndian.Uint16(data[2:4]), + } + if c.Version != 1 && c.Version != 2 { + return fmt.Errorf("Invalid CiscoDiscovery version number %d", c.Version) + } + var err error + c.Values, err = decodeCiscoDiscoveryTLVs(data[4:], p) + if err != nil { + return err + } + c.Contents = data[0:4] + c.Payload = data[4:] + p.AddLayer(c) + return p.NextDecoder(gopacket.DecodeFunc(decodeCiscoDiscoveryInfo)) +} + +// LayerType returns gopacket.LayerTypeCiscoDiscoveryInfo. +func (c *CiscoDiscoveryInfo) LayerType() gopacket.LayerType { + return LayerTypeCiscoDiscoveryInfo +} + +func decodeCiscoDiscoveryTLVs(data []byte, p gopacket.PacketBuilder) (values []CiscoDiscoveryValue, err error) { + for len(data) > 0 { + if len(data) < 4 { + p.SetTruncated() + return nil, errors.New("CDP TLV < 4 bytes") + } + val := CiscoDiscoveryValue{ + Type: CDPTLVType(binary.BigEndian.Uint16(data[:2])), + Length: binary.BigEndian.Uint16(data[2:4]), + } + if val.Length < 4 { + err = fmt.Errorf("Invalid CiscoDiscovery value length %d", val.Length) + break + } else if len(data) < int(val.Length) { + p.SetTruncated() + return nil, fmt.Errorf("CDP TLV < length %d", val.Length) + } + val.Value = data[4:val.Length] + values = append(values, val) + data = data[val.Length:] + } + return +} + +func decodeCiscoDiscoveryInfo(data []byte, p gopacket.PacketBuilder) error { + var err error + info := &CiscoDiscoveryInfo{BaseLayer: BaseLayer{Contents: data}} + p.AddLayer(info) + values, err := decodeCiscoDiscoveryTLVs(data, p) + if err != nil { // Unlikely, as parent decode will fail, but better safe... + return err + } + for _, val := range values { + switch val.Type { + case CDPTLVDevID: + info.DeviceID = string(val.Value) + case CDPTLVAddress: + if err = checkCDPTLVLen(val, 4); err != nil { + return err + } + info.Addresses, err = decodeAddresses(val.Value) + if err != nil { + return err + } + case CDPTLVPortID: + info.PortID = string(val.Value) + case CDPTLVCapabilities: + if err = checkCDPTLVLen(val, 4); err != nil { + return err + } + val := CDPCapability(binary.BigEndian.Uint32(val.Value[0:4])) + info.Capabilities.L3Router = (val&CDPCapMaskRouter > 0) + info.Capabilities.TBBridge = (val&CDPCapMaskTBBridge > 0) + info.Capabilities.SPBridge = (val&CDPCapMaskSPBridge > 0) + info.Capabilities.L2Switch = (val&CDPCapMaskSwitch > 0) + info.Capabilities.IsHost = (val&CDPCapMaskHost > 0) + info.Capabilities.IGMPFilter = (val&CDPCapMaskIGMPFilter > 0) + info.Capabilities.L1Repeater = (val&CDPCapMaskRepeater > 0) + info.Capabilities.IsPhone = (val&CDPCapMaskPhone > 0) + info.Capabilities.RemotelyManaged = (val&CDPCapMaskRemote > 0) + case CDPTLVVersion: + info.Version = string(val.Value) + case CDPTLVPlatform: + info.Platform = string(val.Value) + case CDPTLVIPPrefix: + v := val.Value + l := len(v) + if l%5 == 0 && l >= 5 { + for len(v) > 0 { + _, ipnet, _ := net.ParseCIDR(fmt.Sprintf("%d.%d.%d.%d/%d", v[0], v[1], v[2], v[3], v[4])) + info.IPPrefixes = append(info.IPPrefixes, *ipnet) + v = v[5:] + } + } else { + return fmt.Errorf("Invalid TLV %v length %d", val.Type, len(val.Value)) + } + case CDPTLVHello: + if err = checkCDPTLVLen(val, 32); err != nil { + return err + } + v := val.Value + info.CDPHello.OUI = v[0:3] + info.CDPHello.ProtocolID = binary.BigEndian.Uint16(v[3:5]) + info.CDPHello.ClusterMaster = v[5:9] + info.CDPHello.Unknown1 = v[9:13] + info.CDPHello.Version = v[13] + info.CDPHello.SubVersion = v[14] + info.CDPHello.Status = v[15] + info.CDPHello.Unknown2 = v[16] + info.CDPHello.ClusterCommander = v[17:23] + info.CDPHello.SwitchMAC = v[23:29] + info.CDPHello.Unknown3 = v[29] + info.CDPHello.ManagementVLAN = binary.BigEndian.Uint16(v[30:32]) + case CDPTLVVTPDomain: + info.VTPDomain = string(val.Value) + case CDPTLVNativeVLAN: + if err = checkCDPTLVLen(val, 2); err != nil { + return err + } + info.NativeVLAN = binary.BigEndian.Uint16(val.Value[0:2]) + case CDPTLVFullDuplex: + if err = checkCDPTLVLen(val, 1); err != nil { + return err + } + info.FullDuplex = (val.Value[0] == 1) + case CDPTLVVLANReply: + if err = checkCDPTLVLen(val, 3); err != nil { + return err + } + info.VLANReply.ID = uint8(val.Value[0]) + info.VLANReply.VLAN = binary.BigEndian.Uint16(val.Value[1:3]) + case CDPTLVVLANQuery: + if err = checkCDPTLVLen(val, 3); err != nil { + return err + } + info.VLANQuery.ID = uint8(val.Value[0]) + info.VLANQuery.VLAN = binary.BigEndian.Uint16(val.Value[1:3]) + case CDPTLVPower: + if err = checkCDPTLVLen(val, 2); err != nil { + return err + } + info.PowerConsumption = binary.BigEndian.Uint16(val.Value[0:2]) + case CDPTLVMTU: + if err = checkCDPTLVLen(val, 4); err != nil { + return err + } + info.MTU = binary.BigEndian.Uint32(val.Value[0:4]) + case CDPTLVExtendedTrust: + if err = checkCDPTLVLen(val, 1); err != nil { + return err + } + info.ExtendedTrust = uint8(val.Value[0]) + case CDPTLVUntrustedCOS: + if err = checkCDPTLVLen(val, 1); err != nil { + return err + } + info.UntrustedCOS = uint8(val.Value[0]) + case CDPTLVSysName: + info.SysName = string(val.Value) + case CDPTLVSysOID: + info.SysOID = string(val.Value) + case CDPTLVMgmtAddresses: + if err = checkCDPTLVLen(val, 4); err != nil { + return err + } + info.MgmtAddresses, err = decodeAddresses(val.Value) + if err != nil { + return err + } + case CDPTLVLocation: + if err = checkCDPTLVLen(val, 2); err != nil { + return err + } + info.Location.Type = uint8(val.Value[0]) + info.Location.Location = string(val.Value[1:]) + + // case CDPTLVLExternalPortID: + // Undocumented + case CDPTLVPowerRequested: + if err = checkCDPTLVLen(val, 4); err != nil { + return err + } + info.PowerRequest.ID = binary.BigEndian.Uint16(val.Value[0:2]) + info.PowerRequest.MgmtID = binary.BigEndian.Uint16(val.Value[2:4]) + for n := 4; n < len(val.Value); n += 4 { + info.PowerRequest.Values = append(info.PowerRequest.Values, binary.BigEndian.Uint32(val.Value[n:n+4])) + } + case CDPTLVPowerAvailable: + if err = checkCDPTLVLen(val, 4); err != nil { + return err + } + info.PowerAvailable.ID = binary.BigEndian.Uint16(val.Value[0:2]) + info.PowerAvailable.MgmtID = binary.BigEndian.Uint16(val.Value[2:4]) + for n := 4; n < len(val.Value); n += 4 { + info.PowerAvailable.Values = append(info.PowerAvailable.Values, binary.BigEndian.Uint32(val.Value[n:n+4])) + } + // case CDPTLVPortUnidirectional + // Undocumented + case CDPTLVEnergyWise: + if err = checkCDPTLVLen(val, 72); err != nil { + return err + } + info.EnergyWise.EncryptedData = val.Value[0:20] + info.EnergyWise.Unknown1 = binary.BigEndian.Uint32(val.Value[20:24]) + info.EnergyWise.SequenceNumber = binary.BigEndian.Uint32(val.Value[24:28]) + info.EnergyWise.ModelNumber = string(val.Value[28:44]) + info.EnergyWise.Unknown2 = binary.BigEndian.Uint16(val.Value[44:46]) + info.EnergyWise.HardwareID = string(val.Value[46:49]) + info.EnergyWise.SerialNum = string(val.Value[49:60]) + info.EnergyWise.Unknown3 = val.Value[60:68] + tlvLen := binary.BigEndian.Uint16(val.Value[68:70]) + tlvNum := binary.BigEndian.Uint16(val.Value[70:72]) + data := val.Value[72:] + if len(data) < int(tlvLen) { + return fmt.Errorf("Invalid TLV length %d vs %d", tlvLen, len(data)) + } + numSeen := 0 + for len(data) > 8 { + numSeen++ + if numSeen > int(tlvNum) { // Too many TLV's ? + return fmt.Errorf("Too many TLV's - wanted %d, saw %d", tlvNum, numSeen) + } + tType := CDPEnergyWiseSubtype(binary.BigEndian.Uint32(data[0:4])) + tLen := int(binary.BigEndian.Uint32(data[4:8])) + if tLen > len(data)-8 { + return fmt.Errorf("Invalid TLV length %d vs %d", tLen, len(data)-8) + } + data = data[8:] + switch tType { + case CDPEnergyWiseRole: + info.EnergyWise.Role = string(data[:]) + case CDPEnergyWiseDomain: + info.EnergyWise.Domain = string(data[:]) + case CDPEnergyWiseName: + info.EnergyWise.Name = string(data[:]) + case CDPEnergyWiseReplyTo: + if len(data) >= 18 { + info.EnergyWise.ReplyUnknown1 = data[0:2] + info.EnergyWise.ReplyPort = data[2:4] + info.EnergyWise.ReplyAddress = data[4:8] + info.EnergyWise.ReplyUnknown2 = data[8:10] + info.EnergyWise.ReplyUnknown3 = data[10:14] + } + } + data = data[tLen:] + } + case CDPTLVSparePairPOE: + if err = checkCDPTLVLen(val, 1); err != nil { + return err + } + v := val.Value[0] + info.SparePairPoe.PSEFourWire = (v&CDPPoEFourWire > 0) + info.SparePairPoe.PDArchShared = (v&CDPPoEPDArch > 0) + info.SparePairPoe.PDRequestOn = (v&CDPPoEPDRequest > 0) + info.SparePairPoe.PSEOn = (v&CDPPoEPSE > 0) + default: + info.Unknown = append(info.Unknown, val) + } + } + return nil +} + +// CDP Protocol Types +const ( + CDPProtocolTypeNLPID byte = 1 + CDPProtocolType802_2 byte = 2 +) + +// CDPAddressType is used to define TLV values within CDP addresses. +type CDPAddressType uint64 + +// CDP Address types. +const ( + CDPAddressTypeCLNP CDPAddressType = 0x81 + CDPAddressTypeIPV4 CDPAddressType = 0xcc + CDPAddressTypeIPV6 CDPAddressType = 0xaaaa030000000800 + CDPAddressTypeDECNET CDPAddressType = 0xaaaa030000006003 + CDPAddressTypeAPPLETALK CDPAddressType = 0xaaaa03000000809b + CDPAddressTypeIPX CDPAddressType = 0xaaaa030000008137 + CDPAddressTypeVINES CDPAddressType = 0xaaaa0300000080c4 + CDPAddressTypeXNS CDPAddressType = 0xaaaa030000000600 + CDPAddressTypeAPOLLO CDPAddressType = 0xaaaa030000008019 +) + +func decodeAddresses(v []byte) (addresses []net.IP, err error) { + numaddr := int(binary.BigEndian.Uint32(v[0:4])) + if numaddr < 1 { + return nil, fmt.Errorf("Invalid Address TLV number %d", numaddr) + } + v = v[4:] + if len(v) < numaddr*8 { + return nil, fmt.Errorf("Invalid Address TLV length %d", len(v)) + } + for i := 0; i < numaddr; i++ { + prottype := v[0] + if prottype != CDPProtocolTypeNLPID && prottype != CDPProtocolType802_2 { // invalid protocol type + return nil, fmt.Errorf("Invalid Address Protocol %d", prottype) + } + protlen := int(v[1]) + if (prottype == CDPProtocolTypeNLPID && protlen != 1) || + (prottype == CDPProtocolType802_2 && protlen != 3 && protlen != 8) { // invalid length + return nil, fmt.Errorf("Invalid Address Protocol length %d", protlen) + } + plen := make([]byte, 8) + copy(plen[8-protlen:], v[2:2+protlen]) + protocol := CDPAddressType(binary.BigEndian.Uint64(plen)) + v = v[2+protlen:] + addrlen := binary.BigEndian.Uint16(v[0:2]) + ab := v[2 : 2+addrlen] + if protocol == CDPAddressTypeIPV4 && addrlen == 4 { + addresses = append(addresses, net.IPv4(ab[0], ab[1], ab[2], ab[3])) + } else if protocol == CDPAddressTypeIPV6 && addrlen == 16 { + addresses = append(addresses, net.IP(ab)) + } else { + // only handle IPV4 & IPV6 for now + } + v = v[2+addrlen:] + if len(v) < 8 { + break + } + } + return +} + +func (t CDPTLVType) String() (s string) { + switch t { + case CDPTLVDevID: + s = "Device ID" + case CDPTLVAddress: + s = "Addresses" + case CDPTLVPortID: + s = "Port ID" + case CDPTLVCapabilities: + s = "Capabilities" + case CDPTLVVersion: + s = "Software Version" + case CDPTLVPlatform: + s = "Platform" + case CDPTLVIPPrefix: + s = "IP Prefix" + case CDPTLVHello: + s = "Protocol Hello" + case CDPTLVVTPDomain: + s = "VTP Management Domain" + case CDPTLVNativeVLAN: + s = "Native VLAN" + case CDPTLVFullDuplex: + s = "Full Duplex" + case CDPTLVVLANReply: + s = "VoIP VLAN Reply" + case CDPTLVVLANQuery: + s = "VLANQuery" + case CDPTLVPower: + s = "Power consumption" + case CDPTLVMTU: + s = "MTU" + case CDPTLVExtendedTrust: + s = "Extended Trust Bitmap" + case CDPTLVUntrustedCOS: + s = "Untrusted Port CoS" + case CDPTLVSysName: + s = "System Name" + case CDPTLVSysOID: + s = "System OID" + case CDPTLVMgmtAddresses: + s = "Management Addresses" + case CDPTLVLocation: + s = "Location" + case CDPTLVExternalPortID: + s = "External Port ID" + case CDPTLVPowerRequested: + s = "Power Requested" + case CDPTLVPowerAvailable: + s = "Power Available" + case CDPTLVPortUnidirectional: + s = "Port Unidirectional" + case CDPTLVEnergyWise: + s = "Energy Wise" + case CDPTLVSparePairPOE: + s = "Spare Pair POE" + default: + s = "Unknown" + } + return +} + +func (a CDPAddressType) String() (s string) { + switch a { + case CDPAddressTypeCLNP: + s = "Connectionless Network Protocol" + case CDPAddressTypeIPV4: + s = "IPv4" + case CDPAddressTypeIPV6: + s = "IPv6" + case CDPAddressTypeDECNET: + s = "DECnet Phase IV" + case CDPAddressTypeAPPLETALK: + s = "Apple Talk" + case CDPAddressTypeIPX: + s = "Novell IPX" + case CDPAddressTypeVINES: + s = "Banyan VINES" + case CDPAddressTypeXNS: + s = "Xerox Network Systems" + case CDPAddressTypeAPOLLO: + s = "Apollo" + default: + s = "Unknown" + } + return +} + +func (t CDPEnergyWiseSubtype) String() (s string) { + switch t { + case CDPEnergyWiseRole: + s = "Role" + case CDPEnergyWiseDomain: + s = "Domain" + case CDPEnergyWiseName: + s = "Name" + case CDPEnergyWiseReplyTo: + s = "ReplyTo" + default: + s = "Unknown" + } + return +} + +func checkCDPTLVLen(v CiscoDiscoveryValue, l int) (err error) { + if len(v.Value) < l { + err = fmt.Errorf("Invalid TLV %v length %d", v.Type, len(v.Value)) + } + return +} diff --git a/vendor/github.com/google/gopacket/layers/ctp.go b/vendor/github.com/google/gopacket/layers/ctp.go new file mode 100644 index 0000000000..82875845a7 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/ctp.go @@ -0,0 +1,109 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + "github.com/google/gopacket" +) + +// EthernetCTPFunction is the function code used by the EthernetCTP protocol to identify each +// EthernetCTP layer. +type EthernetCTPFunction uint16 + +// EthernetCTPFunction values. +const ( + EthernetCTPFunctionReply EthernetCTPFunction = 1 + EthernetCTPFunctionForwardData EthernetCTPFunction = 2 +) + +// EthernetCTP implements the EthernetCTP protocol, see http://www.mit.edu/people/jhawk/ctp.html. +// We split EthernetCTP up into the top-level EthernetCTP layer, followed by zero or more +// EthernetCTPForwardData layers, followed by a final EthernetCTPReply layer. +type EthernetCTP struct { + BaseLayer + SkipCount uint16 +} + +// LayerType returns gopacket.LayerTypeEthernetCTP. +func (c *EthernetCTP) LayerType() gopacket.LayerType { + return LayerTypeEthernetCTP +} + +// EthernetCTPForwardData is the ForwardData layer inside EthernetCTP. See EthernetCTP's docs for more +// details. +type EthernetCTPForwardData struct { + BaseLayer + Function EthernetCTPFunction + ForwardAddress []byte +} + +// LayerType returns gopacket.LayerTypeEthernetCTPForwardData. +func (c *EthernetCTPForwardData) LayerType() gopacket.LayerType { + return LayerTypeEthernetCTPForwardData +} + +// ForwardEndpoint returns the EthernetCTPForwardData ForwardAddress as an endpoint. +func (c *EthernetCTPForwardData) ForwardEndpoint() gopacket.Endpoint { + return gopacket.NewEndpoint(EndpointMAC, c.ForwardAddress) +} + +// EthernetCTPReply is the Reply layer inside EthernetCTP. See EthernetCTP's docs for more details. +type EthernetCTPReply struct { + BaseLayer + Function EthernetCTPFunction + ReceiptNumber uint16 + Data []byte +} + +// LayerType returns gopacket.LayerTypeEthernetCTPReply. +func (c *EthernetCTPReply) LayerType() gopacket.LayerType { + return LayerTypeEthernetCTPReply +} + +// Payload returns the EthernetCTP reply's Data bytes. +func (c *EthernetCTPReply) Payload() []byte { return c.Data } + +func decodeEthernetCTP(data []byte, p gopacket.PacketBuilder) error { + c := &EthernetCTP{ + SkipCount: binary.LittleEndian.Uint16(data[:2]), + BaseLayer: BaseLayer{data[:2], data[2:]}, + } + if c.SkipCount%2 != 0 { + return fmt.Errorf("EthernetCTP skip count is odd: %d", c.SkipCount) + } + p.AddLayer(c) + return p.NextDecoder(gopacket.DecodeFunc(decodeEthernetCTPFromFunctionType)) +} + +// decodeEthernetCTPFromFunctionType reads in the first 2 bytes to determine the EthernetCTP +// layer type to decode next, then decodes based on that. +func decodeEthernetCTPFromFunctionType(data []byte, p gopacket.PacketBuilder) error { + function := EthernetCTPFunction(binary.LittleEndian.Uint16(data[:2])) + switch function { + case EthernetCTPFunctionReply: + reply := &EthernetCTPReply{ + Function: function, + ReceiptNumber: binary.LittleEndian.Uint16(data[2:4]), + Data: data[4:], + BaseLayer: BaseLayer{data, nil}, + } + p.AddLayer(reply) + p.SetApplicationLayer(reply) + return nil + case EthernetCTPFunctionForwardData: + forward := &EthernetCTPForwardData{ + Function: function, + ForwardAddress: data[2:8], + BaseLayer: BaseLayer{data[:8], data[8:]}, + } + p.AddLayer(forward) + return p.NextDecoder(gopacket.DecodeFunc(decodeEthernetCTPFromFunctionType)) + } + return fmt.Errorf("Unknown EthernetCTP function type %v", function) +} diff --git a/vendor/github.com/google/gopacket/layers/dhcpv4.go b/vendor/github.com/google/gopacket/layers/dhcpv4.go new file mode 100644 index 0000000000..d79c591504 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/dhcpv4.go @@ -0,0 +1,592 @@ +// Copyright 2016 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "bytes" + "encoding/binary" + "fmt" + "net" + + "github.com/google/gopacket" +) + +// DHCPOp rerprents a bootp operation +type DHCPOp byte + +// bootp operations +const ( + DHCPOpRequest DHCPOp = 1 + DHCPOpReply DHCPOp = 2 +) + +// String returns a string version of a DHCPOp. +func (o DHCPOp) String() string { + switch o { + case DHCPOpRequest: + return "Request" + case DHCPOpReply: + return "Reply" + default: + return "Unknown" + } +} + +// DHCPMsgType represents a DHCP operation +type DHCPMsgType byte + +// Constants that represent DHCP operations +const ( + DHCPMsgTypeUnspecified DHCPMsgType = iota + DHCPMsgTypeDiscover + DHCPMsgTypeOffer + DHCPMsgTypeRequest + DHCPMsgTypeDecline + DHCPMsgTypeAck + DHCPMsgTypeNak + DHCPMsgTypeRelease + DHCPMsgTypeInform +) + +// String returns a string version of a DHCPMsgType. +func (o DHCPMsgType) String() string { + switch o { + case DHCPMsgTypeUnspecified: + return "Unspecified" + case DHCPMsgTypeDiscover: + return "Discover" + case DHCPMsgTypeOffer: + return "Offer" + case DHCPMsgTypeRequest: + return "Request" + case DHCPMsgTypeDecline: + return "Decline" + case DHCPMsgTypeAck: + return "Ack" + case DHCPMsgTypeNak: + return "Nak" + case DHCPMsgTypeRelease: + return "Release" + case DHCPMsgTypeInform: + return "Inform" + default: + return "Unknown" + } +} + +//DHCPMagic is the RFC 2131 "magic cooke" for DHCP. +var DHCPMagic uint32 = 0x63825363 + +// DHCPv4 contains data for a single DHCP packet. +type DHCPv4 struct { + BaseLayer + Operation DHCPOp + HardwareType LinkType + HardwareLen uint8 + HardwareOpts uint8 + Xid uint32 + Secs uint16 + Flags uint16 + ClientIP net.IP + YourClientIP net.IP + NextServerIP net.IP + RelayAgentIP net.IP + ClientHWAddr net.HardwareAddr + ServerName []byte + File []byte + Options DHCPOptions +} + +// DHCPOptions is used to get nicely printed option lists which would normally +// be cut off after 5 options. +type DHCPOptions []DHCPOption + +// String returns a string version of the options list. +func (o DHCPOptions) String() string { + buf := &bytes.Buffer{} + buf.WriteByte('[') + for i, opt := range o { + buf.WriteString(opt.String()) + if i+1 != len(o) { + buf.WriteString(", ") + } + } + buf.WriteByte(']') + return buf.String() +} + +// LayerType returns gopacket.LayerTypeDHCPv4 +func (d *DHCPv4) LayerType() gopacket.LayerType { return LayerTypeDHCPv4 } + +// DecodeFromBytes decodes the given bytes into this layer. +func (d *DHCPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 240 { + df.SetTruncated() + return fmt.Errorf("DHCPv4 length %d too short", len(data)) + } + d.Options = d.Options[:0] + d.Operation = DHCPOp(data[0]) + d.HardwareType = LinkType(data[1]) + d.HardwareLen = data[2] + d.HardwareOpts = data[3] + d.Xid = binary.BigEndian.Uint32(data[4:8]) + d.Secs = binary.BigEndian.Uint16(data[8:10]) + d.Flags = binary.BigEndian.Uint16(data[10:12]) + d.ClientIP = net.IP(data[12:16]) + d.YourClientIP = net.IP(data[16:20]) + d.NextServerIP = net.IP(data[20:24]) + d.RelayAgentIP = net.IP(data[24:28]) + d.ClientHWAddr = net.HardwareAddr(data[28 : 28+d.HardwareLen]) + d.ServerName = data[44:108] + d.File = data[108:236] + if binary.BigEndian.Uint32(data[236:240]) != DHCPMagic { + return InvalidMagicCookie + } + + if len(data) <= 240 { + // DHCP Packet could have no option (??) + return nil + } + + options := data[240:] + + stop := len(options) + start := 0 + for start < stop { + o := DHCPOption{} + if err := o.decode(options[start:]); err != nil { + return err + } + if o.Type == DHCPOptEnd { + break + } + d.Options = append(d.Options, o) + // Check if the option is a single byte pad + if o.Type == DHCPOptPad { + start++ + } else { + start += int(o.Length) + 2 + } + } + + d.Contents = data + + return nil +} + +// Len returns the length of a DHCPv4 packet. +func (d *DHCPv4) Len() uint16 { + n := uint16(240) + for _, o := range d.Options { + if o.Type == DHCPOptPad { + n++ + } else { + n += uint16(o.Length) + 2 + } + } + n++ // for opt end + return n +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (d *DHCPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + plen := int(d.Len()) + + data, err := b.PrependBytes(plen) + if err != nil { + return err + } + + data[0] = byte(d.Operation) + data[1] = byte(d.HardwareType) + if opts.FixLengths { + d.HardwareLen = uint8(len(d.ClientHWAddr)) + } + data[2] = d.HardwareLen + data[3] = d.HardwareOpts + binary.BigEndian.PutUint32(data[4:8], d.Xid) + binary.BigEndian.PutUint16(data[8:10], d.Secs) + binary.BigEndian.PutUint16(data[10:12], d.Flags) + copy(data[12:16], d.ClientIP.To4()) + copy(data[16:20], d.YourClientIP.To4()) + copy(data[20:24], d.NextServerIP.To4()) + copy(data[24:28], d.RelayAgentIP.To4()) + copy(data[28:44], d.ClientHWAddr) + copy(data[44:108], d.ServerName) + copy(data[108:236], d.File) + binary.BigEndian.PutUint32(data[236:240], DHCPMagic) + + if len(d.Options) > 0 { + offset := 240 + for _, o := range d.Options { + if err := o.encode(data[offset:]); err != nil { + return err + } + // A pad option is only a single byte + if o.Type == DHCPOptPad { + offset++ + } else { + offset += 2 + len(o.Data) + } + } + optend := NewDHCPOption(DHCPOptEnd, nil) + if err := optend.encode(data[offset:]); err != nil { + return err + } + } + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (d *DHCPv4) CanDecode() gopacket.LayerClass { + return LayerTypeDHCPv4 +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (d *DHCPv4) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func decodeDHCPv4(data []byte, p gopacket.PacketBuilder) error { + dhcp := &DHCPv4{} + err := dhcp.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(dhcp) + return p.NextDecoder(gopacket.LayerTypePayload) +} + +// DHCPOpt represents a DHCP option or parameter from RFC-2132 +type DHCPOpt byte + +// Constants for the DHCPOpt options. +const ( + DHCPOptPad DHCPOpt = 0 + DHCPOptSubnetMask DHCPOpt = 1 // 4, net.IP + DHCPOptTimeOffset DHCPOpt = 2 // 4, int32 (signed seconds from UTC) + DHCPOptRouter DHCPOpt = 3 // n*4, [n]net.IP + DHCPOptTimeServer DHCPOpt = 4 // n*4, [n]net.IP + DHCPOptNameServer DHCPOpt = 5 // n*4, [n]net.IP + DHCPOptDNS DHCPOpt = 6 // n*4, [n]net.IP + DHCPOptLogServer DHCPOpt = 7 // n*4, [n]net.IP + DHCPOptCookieServer DHCPOpt = 8 // n*4, [n]net.IP + DHCPOptLPRServer DHCPOpt = 9 // n*4, [n]net.IP + DHCPOptImpressServer DHCPOpt = 10 // n*4, [n]net.IP + DHCPOptResLocServer DHCPOpt = 11 // n*4, [n]net.IP + DHCPOptHostname DHCPOpt = 12 // n, string + DHCPOptBootfileSize DHCPOpt = 13 // 2, uint16 + DHCPOptMeritDumpFile DHCPOpt = 14 // >1, string + DHCPOptDomainName DHCPOpt = 15 // n, string + DHCPOptSwapServer DHCPOpt = 16 // n*4, [n]net.IP + DHCPOptRootPath DHCPOpt = 17 // n, string + DHCPOptExtensionsPath DHCPOpt = 18 // n, string + DHCPOptIPForwarding DHCPOpt = 19 // 1, bool + DHCPOptSourceRouting DHCPOpt = 20 // 1, bool + DHCPOptPolicyFilter DHCPOpt = 21 // 8*n, [n]{net.IP/net.IP} + DHCPOptDatagramMTU DHCPOpt = 22 // 2, uint16 + DHCPOptDefaultTTL DHCPOpt = 23 // 1, byte + DHCPOptPathMTUAgingTimeout DHCPOpt = 24 // 4, uint32 + DHCPOptPathPlateuTableOption DHCPOpt = 25 // 2*n, []uint16 + DHCPOptInterfaceMTU DHCPOpt = 26 // 2, uint16 + DHCPOptAllSubsLocal DHCPOpt = 27 // 1, bool + DHCPOptBroadcastAddr DHCPOpt = 28 // 4, net.IP + DHCPOptMaskDiscovery DHCPOpt = 29 // 1, bool + DHCPOptMaskSupplier DHCPOpt = 30 // 1, bool + DHCPOptRouterDiscovery DHCPOpt = 31 // 1, bool + DHCPOptSolicitAddr DHCPOpt = 32 // 4, net.IP + DHCPOptStaticRoute DHCPOpt = 33 // n*8, [n]{net.IP/net.IP} -- note the 2nd is router not mask + DHCPOptARPTrailers DHCPOpt = 34 // 1, bool + DHCPOptARPTimeout DHCPOpt = 35 // 4, uint32 + DHCPOptEthernetEncap DHCPOpt = 36 // 1, bool + DHCPOptTCPTTL DHCPOpt = 37 // 1, byte + DHCPOptTCPKeepAliveInt DHCPOpt = 38 // 4, uint32 + DHCPOptTCPKeepAliveGarbage DHCPOpt = 39 // 1, bool + DHCPOptNISDomain DHCPOpt = 40 // n, string + DHCPOptNISServers DHCPOpt = 41 // 4*n, [n]net.IP + DHCPOptNTPServers DHCPOpt = 42 // 4*n, [n]net.IP + DHCPOptVendorOption DHCPOpt = 43 // n, [n]byte // may be encapsulated. + DHCPOptNetBIOSTCPNS DHCPOpt = 44 // 4*n, [n]net.IP + DHCPOptNetBIOSTCPDDS DHCPOpt = 45 // 4*n, [n]net.IP + DHCPOptNETBIOSTCPNodeType DHCPOpt = 46 // 1, magic byte + DHCPOptNetBIOSTCPScope DHCPOpt = 47 // n, string + DHCPOptXFontServer DHCPOpt = 48 // n, string + DHCPOptXDisplayManager DHCPOpt = 49 // n, string + DHCPOptRequestIP DHCPOpt = 50 // 4, net.IP + DHCPOptLeaseTime DHCPOpt = 51 // 4, uint32 + DHCPOptExtOptions DHCPOpt = 52 // 1, 1/2/3 + DHCPOptMessageType DHCPOpt = 53 // 1, 1-7 + DHCPOptServerID DHCPOpt = 54 // 4, net.IP + DHCPOptParamsRequest DHCPOpt = 55 // n, []byte + DHCPOptMessage DHCPOpt = 56 // n, 3 + DHCPOptMaxMessageSize DHCPOpt = 57 // 2, uint16 + DHCPOptT1 DHCPOpt = 58 // 4, uint32 + DHCPOptT2 DHCPOpt = 59 // 4, uint32 + DHCPOptClassID DHCPOpt = 60 // n, []byte + DHCPOptClientID DHCPOpt = 61 // n >= 2, []byte + DHCPOptDomainSearch DHCPOpt = 119 // n, string + DHCPOptSIPServers DHCPOpt = 120 // n, url + DHCPOptClasslessStaticRoute DHCPOpt = 121 // + DHCPOptEnd DHCPOpt = 255 +) + +// String returns a string version of a DHCPOpt. +func (o DHCPOpt) String() string { + switch o { + case DHCPOptPad: + return "(padding)" + case DHCPOptSubnetMask: + return "SubnetMask" + case DHCPOptTimeOffset: + return "TimeOffset" + case DHCPOptRouter: + return "Router" + case DHCPOptTimeServer: + return "rfc868" // old time server protocol stringified to dissuade confusion w. NTP + case DHCPOptNameServer: + return "ien116" // obscure nameserver protocol stringified to dissuade confusion w. DNS + case DHCPOptDNS: + return "DNS" + case DHCPOptLogServer: + return "mitLCS" // MIT LCS server protocol yada yada w. Syslog + case DHCPOptCookieServer: + return "CookieServer" + case DHCPOptLPRServer: + return "LPRServer" + case DHCPOptImpressServer: + return "ImpressServer" + case DHCPOptResLocServer: + return "ResourceLocationServer" + case DHCPOptHostname: + return "Hostname" + case DHCPOptBootfileSize: + return "BootfileSize" + case DHCPOptMeritDumpFile: + return "MeritDumpFile" + case DHCPOptDomainName: + return "DomainName" + case DHCPOptSwapServer: + return "SwapServer" + case DHCPOptRootPath: + return "RootPath" + case DHCPOptExtensionsPath: + return "ExtensionsPath" + case DHCPOptIPForwarding: + return "IPForwarding" + case DHCPOptSourceRouting: + return "SourceRouting" + case DHCPOptPolicyFilter: + return "PolicyFilter" + case DHCPOptDatagramMTU: + return "DatagramMTU" + case DHCPOptDefaultTTL: + return "DefaultTTL" + case DHCPOptPathMTUAgingTimeout: + return "PathMTUAgingTimeout" + case DHCPOptPathPlateuTableOption: + return "PathPlateuTableOption" + case DHCPOptInterfaceMTU: + return "InterfaceMTU" + case DHCPOptAllSubsLocal: + return "AllSubsLocal" + case DHCPOptBroadcastAddr: + return "BroadcastAddress" + case DHCPOptMaskDiscovery: + return "MaskDiscovery" + case DHCPOptMaskSupplier: + return "MaskSupplier" + case DHCPOptRouterDiscovery: + return "RouterDiscovery" + case DHCPOptSolicitAddr: + return "SolicitAddr" + case DHCPOptStaticRoute: + return "StaticRoute" + case DHCPOptARPTrailers: + return "ARPTrailers" + case DHCPOptARPTimeout: + return "ARPTimeout" + case DHCPOptEthernetEncap: + return "EthernetEncap" + case DHCPOptTCPTTL: + return "TCPTTL" + case DHCPOptTCPKeepAliveInt: + return "TCPKeepAliveInt" + case DHCPOptTCPKeepAliveGarbage: + return "TCPKeepAliveGarbage" + case DHCPOptNISDomain: + return "NISDomain" + case DHCPOptNISServers: + return "NISServers" + case DHCPOptNTPServers: + return "NTPServers" + case DHCPOptVendorOption: + return "VendorOption" + case DHCPOptNetBIOSTCPNS: + return "NetBIOSOverTCPNS" + case DHCPOptNetBIOSTCPDDS: + return "NetBiosOverTCPDDS" + case DHCPOptNETBIOSTCPNodeType: + return "NetBIOSOverTCPNodeType" + case DHCPOptNetBIOSTCPScope: + return "NetBIOSOverTCPScope" + case DHCPOptXFontServer: + return "XFontServer" + case DHCPOptXDisplayManager: + return "XDisplayManager" + case DHCPOptEnd: + return "(end)" + case DHCPOptSIPServers: + return "SipServers" + case DHCPOptRequestIP: + return "RequestIP" + case DHCPOptLeaseTime: + return "LeaseTime" + case DHCPOptExtOptions: + return "ExtOpts" + case DHCPOptMessageType: + return "MessageType" + case DHCPOptServerID: + return "ServerID" + case DHCPOptParamsRequest: + return "ParamsRequest" + case DHCPOptMessage: + return "Message" + case DHCPOptMaxMessageSize: + return "MaxDHCPSize" + case DHCPOptT1: + return "Timer1" + case DHCPOptT2: + return "Timer2" + case DHCPOptClassID: + return "ClassID" + case DHCPOptClientID: + return "ClientID" + case DHCPOptDomainSearch: + return "DomainSearch" + case DHCPOptClasslessStaticRoute: + return "ClasslessStaticRoute" + default: + return "Unknown" + } +} + +// DHCPOption rerpresents a DHCP option. +type DHCPOption struct { + Type DHCPOpt + Length uint8 + Data []byte +} + +// String returns a string version of a DHCP Option. +func (o DHCPOption) String() string { + switch o.Type { + + case DHCPOptHostname, DHCPOptMeritDumpFile, DHCPOptDomainName, DHCPOptRootPath, + DHCPOptExtensionsPath, DHCPOptNISDomain, DHCPOptNetBIOSTCPScope, DHCPOptXFontServer, + DHCPOptXDisplayManager, DHCPOptMessage, DHCPOptDomainSearch: // string + return fmt.Sprintf("Option(%s:%s)", o.Type, string(o.Data)) + + case DHCPOptMessageType: + if len(o.Data) != 1 { + return fmt.Sprintf("Option(%s:INVALID)", o.Type) + } + return fmt.Sprintf("Option(%s:%s)", o.Type, DHCPMsgType(o.Data[0])) + + case DHCPOptSubnetMask, DHCPOptServerID, DHCPOptBroadcastAddr, + DHCPOptSolicitAddr, DHCPOptRequestIP: // net.IP + if len(o.Data) < 4 { + return fmt.Sprintf("Option(%s:INVALID)", o.Type) + } + return fmt.Sprintf("Option(%s:%s)", o.Type, net.IP(o.Data)) + + case DHCPOptT1, DHCPOptT2, DHCPOptLeaseTime, DHCPOptPathMTUAgingTimeout, + DHCPOptARPTimeout, DHCPOptTCPKeepAliveInt: // uint32 + if len(o.Data) != 4 { + return fmt.Sprintf("Option(%s:INVALID)", o.Type) + } + return fmt.Sprintf("Option(%s:%d)", o.Type, + uint32(o.Data[0])<<24|uint32(o.Data[1])<<16|uint32(o.Data[2])<<8|uint32(o.Data[3])) + + case DHCPOptParamsRequest: + buf := &bytes.Buffer{} + buf.WriteString(fmt.Sprintf("Option(%s:", o.Type)) + for i, v := range o.Data { + buf.WriteString(DHCPOpt(v).String()) + if i+1 != len(o.Data) { + buf.WriteByte(',') + } + } + buf.WriteString(")") + return buf.String() + + default: + return fmt.Sprintf("Option(%s:%v)", o.Type, o.Data) + } +} + +// NewDHCPOption constructs a new DHCPOption with a given type and data. +func NewDHCPOption(t DHCPOpt, data []byte) DHCPOption { + o := DHCPOption{Type: t} + if data != nil { + o.Data = data + o.Length = uint8(len(data)) + } + return o +} + +func (o *DHCPOption) encode(b []byte) error { + switch o.Type { + case DHCPOptPad, DHCPOptEnd: + b[0] = byte(o.Type) + default: + b[0] = byte(o.Type) + b[1] = o.Length + copy(b[2:], o.Data) + } + return nil +} + +func (o *DHCPOption) decode(data []byte) error { + if len(data) < 1 { + // Pad/End have a length of 1 + return DecOptionNotEnoughData + } + o.Type = DHCPOpt(data[0]) + switch o.Type { + case DHCPOptPad, DHCPOptEnd: + o.Data = nil + default: + if len(data) < 2 { + return DecOptionNotEnoughData + } + o.Length = data[1] + if int(o.Length) > len(data[2:]) { + return DecOptionMalformed + } + o.Data = data[2 : 2+int(o.Length)] + } + return nil +} + +// DHCPv4Error is used for constant errors for DHCPv4. It is needed for test asserts. +type DHCPv4Error string + +// DHCPv4Error implements error interface. +func (d DHCPv4Error) Error() string { + return string(d) +} + +const ( + // DecOptionNotEnoughData is returned when there is not enough data during option's decode process + DecOptionNotEnoughData = DHCPv4Error("Not enough data to decode") + // DecOptionMalformed is returned when the option is malformed + DecOptionMalformed = DHCPv4Error("Option is malformed") + // InvalidMagicCookie is returned when Magic cookie is missing into BOOTP header + InvalidMagicCookie = DHCPv4Error("Bad DHCP header") +) diff --git a/vendor/github.com/google/gopacket/layers/dhcpv6.go b/vendor/github.com/google/gopacket/layers/dhcpv6.go new file mode 100644 index 0000000000..2698cfb196 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/dhcpv6.go @@ -0,0 +1,360 @@ +// Copyright 2018 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + "net" + + "github.com/google/gopacket" +) + +// DHCPv6MsgType represents a DHCPv6 operation +type DHCPv6MsgType byte + +// Constants that represent DHCP operations +const ( + DHCPv6MsgTypeUnspecified DHCPv6MsgType = iota + DHCPv6MsgTypeSolicit + DHCPv6MsgTypeAdverstise + DHCPv6MsgTypeRequest + DHCPv6MsgTypeConfirm + DHCPv6MsgTypeRenew + DHCPv6MsgTypeRebind + DHCPv6MsgTypeReply + DHCPv6MsgTypeRelease + DHCPv6MsgTypeDecline + DHCPv6MsgTypeReconfigure + DHCPv6MsgTypeInformationRequest + DHCPv6MsgTypeRelayForward + DHCPv6MsgTypeRelayReply +) + +// String returns a string version of a DHCPv6MsgType. +func (o DHCPv6MsgType) String() string { + switch o { + case DHCPv6MsgTypeUnspecified: + return "Unspecified" + case DHCPv6MsgTypeSolicit: + return "Solicit" + case DHCPv6MsgTypeAdverstise: + return "Adverstise" + case DHCPv6MsgTypeRequest: + return "Request" + case DHCPv6MsgTypeConfirm: + return "Confirm" + case DHCPv6MsgTypeRenew: + return "Renew" + case DHCPv6MsgTypeRebind: + return "Rebind" + case DHCPv6MsgTypeReply: + return "Reply" + case DHCPv6MsgTypeRelease: + return "Release" + case DHCPv6MsgTypeDecline: + return "Decline" + case DHCPv6MsgTypeReconfigure: + return "Reconfigure" + case DHCPv6MsgTypeInformationRequest: + return "InformationRequest" + case DHCPv6MsgTypeRelayForward: + return "RelayForward" + case DHCPv6MsgTypeRelayReply: + return "RelayReply" + default: + return "Unknown" + } +} + +// DHCPv6 contains data for a single DHCP packet. +type DHCPv6 struct { + BaseLayer + MsgType DHCPv6MsgType + HopCount uint8 + LinkAddr net.IP + PeerAddr net.IP + TransactionID []byte + Options DHCPv6Options +} + +// LayerType returns gopacket.LayerTypeDHCPv6 +func (d *DHCPv6) LayerType() gopacket.LayerType { return LayerTypeDHCPv6 } + +// DecodeFromBytes decodes the given bytes into this layer. +func (d *DHCPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return fmt.Errorf("DHCPv6 length %d too short", len(data)) + } + d.BaseLayer = BaseLayer{Contents: data} + d.Options = d.Options[:0] + d.MsgType = DHCPv6MsgType(data[0]) + + offset := 0 + if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply { + if len(data) < 34 { + df.SetTruncated() + return fmt.Errorf("DHCPv6 length %d too short for message type %d", len(data), d.MsgType) + } + d.HopCount = data[1] + d.LinkAddr = net.IP(data[2:18]) + d.PeerAddr = net.IP(data[18:34]) + offset = 34 + } else { + d.TransactionID = data[1:4] + offset = 4 + } + + stop := len(data) + for offset < stop { + o := DHCPv6Option{} + if err := o.decode(data[offset:]); err != nil { + return err + } + d.Options = append(d.Options, o) + offset += int(o.Length) + 4 // 2 from option code, 2 from option length + } + + return nil +} + +// Len returns the length of a DHCPv6 packet. +func (d *DHCPv6) Len() int { + n := 1 + if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply { + n += 33 + } else { + n += 3 + } + + for _, o := range d.Options { + n += int(o.Length) + 4 // 2 from option code, 2 from option length + } + + return n +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (d *DHCPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + plen := int(d.Len()) + + data, err := b.PrependBytes(plen) + if err != nil { + return err + } + + offset := 0 + data[0] = byte(d.MsgType) + if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply { + data[1] = byte(d.HopCount) + copy(data[2:18], d.LinkAddr.To16()) + copy(data[18:34], d.PeerAddr.To16()) + offset = 34 + } else { + copy(data[1:4], d.TransactionID) + offset = 4 + } + + if len(d.Options) > 0 { + for _, o := range d.Options { + if err := o.encode(data[offset:], opts); err != nil { + return err + } + offset += int(o.Length) + 4 // 2 from option code, 2 from option length + } + } + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (d *DHCPv6) CanDecode() gopacket.LayerClass { + return LayerTypeDHCPv6 +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (d *DHCPv6) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func decodeDHCPv6(data []byte, p gopacket.PacketBuilder) error { + dhcp := &DHCPv6{} + err := dhcp.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(dhcp) + return p.NextDecoder(gopacket.LayerTypePayload) +} + +// DHCPv6StatusCode represents a DHCP status code - RFC-3315 +type DHCPv6StatusCode uint16 + +// Constants for the DHCPv6StatusCode. +const ( + DHCPv6StatusCodeSuccess DHCPv6StatusCode = iota + DHCPv6StatusCodeUnspecFail + DHCPv6StatusCodeNoAddrsAvail + DHCPv6StatusCodeNoBinding + DHCPv6StatusCodeNotOnLink + DHCPv6StatusCodeUseMulticast +) + +// String returns a string version of a DHCPv6StatusCode. +func (o DHCPv6StatusCode) String() string { + switch o { + case DHCPv6StatusCodeSuccess: + return "Success" + case DHCPv6StatusCodeUnspecFail: + return "UnspecifiedFailure" + case DHCPv6StatusCodeNoAddrsAvail: + return "NoAddressAvailable" + case DHCPv6StatusCodeNoBinding: + return "NoBinding" + case DHCPv6StatusCodeNotOnLink: + return "NotOnLink" + case DHCPv6StatusCodeUseMulticast: + return "UseMulticast" + default: + return "Unknown" + } +} + +// DHCPv6DUIDType represents a DHCP DUID - RFC-3315 +type DHCPv6DUIDType uint16 + +// Constants for the DHCPv6DUIDType. +const ( + DHCPv6DUIDTypeLLT DHCPv6DUIDType = iota + 1 + DHCPv6DUIDTypeEN + DHCPv6DUIDTypeLL +) + +// String returns a string version of a DHCPv6DUIDType. +func (o DHCPv6DUIDType) String() string { + switch o { + case DHCPv6DUIDTypeLLT: + return "LLT" + case DHCPv6DUIDTypeEN: + return "EN" + case DHCPv6DUIDTypeLL: + return "LL" + default: + return "Unknown" + } +} + +// DHCPv6DUID means DHCP Unique Identifier as stated in RFC 3315, section 9 (https://tools.ietf.org/html/rfc3315#page-19) +type DHCPv6DUID struct { + Type DHCPv6DUIDType + // LLT, LL + HardwareType []byte + // EN + EnterpriseNumber []byte + // LLT + Time []byte + // LLT, LL + LinkLayerAddress net.HardwareAddr + // EN + Identifier []byte +} + +// DecodeFromBytes decodes the given bytes into a DHCPv6DUID +func (d *DHCPv6DUID) DecodeFromBytes(data []byte) error { + if len(data) < 2 { + return fmt.Errorf("Not enough bytes to decode: %d", len(data)) + } + + d.Type = DHCPv6DUIDType(binary.BigEndian.Uint16(data[:2])) + if d.Type == DHCPv6DUIDTypeLLT || d.Type == DHCPv6DUIDTypeLL { + if len(data) < 4 { + return fmt.Errorf("Not enough bytes to decode: %d", len(data)) + } + d.HardwareType = data[2:4] + } + + if d.Type == DHCPv6DUIDTypeLLT { + if len(data) < 8 { + return fmt.Errorf("Not enough bytes to decode: %d", len(data)) + } + d.Time = data[4:8] + d.LinkLayerAddress = net.HardwareAddr(data[8:]) + } else if d.Type == DHCPv6DUIDTypeEN { + if len(data) < 6 { + return fmt.Errorf("Not enough bytes to decode: %d", len(data)) + } + d.EnterpriseNumber = data[2:6] + d.Identifier = data[6:] + } else { // DHCPv6DUIDTypeLL + if len(data) < 4 { + return fmt.Errorf("Not enough bytes to decode: %d", len(data)) + } + d.LinkLayerAddress = net.HardwareAddr(data[4:]) + } + + return nil +} + +// Encode encodes the DHCPv6DUID in a slice of bytes +func (d *DHCPv6DUID) Encode() []byte { + length := d.Len() + data := make([]byte, length) + binary.BigEndian.PutUint16(data[0:2], uint16(d.Type)) + + if d.Type == DHCPv6DUIDTypeLLT || d.Type == DHCPv6DUIDTypeLL { + copy(data[2:4], d.HardwareType) + } + + if d.Type == DHCPv6DUIDTypeLLT { + copy(data[4:8], d.Time) + copy(data[8:], d.LinkLayerAddress) + } else if d.Type == DHCPv6DUIDTypeEN { + copy(data[2:6], d.EnterpriseNumber) + copy(data[6:], d.Identifier) + } else { + copy(data[4:], d.LinkLayerAddress) + } + + return data +} + +// Len returns the length of the DHCPv6DUID, respecting the type +func (d *DHCPv6DUID) Len() int { + length := 2 // d.Type + if d.Type == DHCPv6DUIDTypeLLT { + length += 2 /*HardwareType*/ + 4 /*d.Time*/ + len(d.LinkLayerAddress) + } else if d.Type == DHCPv6DUIDTypeEN { + length += 4 /*d.EnterpriseNumber*/ + len(d.Identifier) + } else { // LL + length += 2 /*d.HardwareType*/ + len(d.LinkLayerAddress) + } + + return length +} + +func (d *DHCPv6DUID) String() string { + duid := "Type: " + d.Type.String() + ", " + if d.Type == DHCPv6DUIDTypeLLT { + duid += fmt.Sprintf("HardwareType: %v, Time: %v, LinkLayerAddress: %v", d.HardwareType, d.Time, d.LinkLayerAddress) + } else if d.Type == DHCPv6DUIDTypeEN { + duid += fmt.Sprintf("EnterpriseNumber: %v, Identifier: %v", d.EnterpriseNumber, d.Identifier) + } else { // DHCPv6DUIDTypeLL + duid += fmt.Sprintf("HardwareType: %v, LinkLayerAddress: %v", d.HardwareType, d.LinkLayerAddress) + } + return duid +} + +func decodeDHCPv6DUID(data []byte) (*DHCPv6DUID, error) { + duid := &DHCPv6DUID{} + err := duid.DecodeFromBytes(data) + if err != nil { + return nil, err + } + return duid, nil +} diff --git a/vendor/github.com/google/gopacket/layers/dhcpv6_options.go b/vendor/github.com/google/gopacket/layers/dhcpv6_options.go new file mode 100644 index 0000000000..5a1f9919b1 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/dhcpv6_options.go @@ -0,0 +1,621 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "github.com/google/gopacket" +) + +// DHCPv6Opt represents a DHCP option or parameter from RFC-3315 +type DHCPv6Opt uint16 + +// Constants for the DHCPv6Opt options. +const ( + DHCPv6OptClientID DHCPv6Opt = 1 + DHCPv6OptServerID DHCPv6Opt = 2 + DHCPv6OptIANA DHCPv6Opt = 3 + DHCPv6OptIATA DHCPv6Opt = 4 + DHCPv6OptIAAddr DHCPv6Opt = 5 + DHCPv6OptOro DHCPv6Opt = 6 + DHCPv6OptPreference DHCPv6Opt = 7 + DHCPv6OptElapsedTime DHCPv6Opt = 8 + DHCPv6OptRelayMessage DHCPv6Opt = 9 + DHCPv6OptAuth DHCPv6Opt = 11 + DHCPv6OptUnicast DHCPv6Opt = 12 + DHCPv6OptStatusCode DHCPv6Opt = 13 + DHCPv6OptRapidCommit DHCPv6Opt = 14 + DHCPv6OptUserClass DHCPv6Opt = 15 + DHCPv6OptVendorClass DHCPv6Opt = 16 + DHCPv6OptVendorOpts DHCPv6Opt = 17 + DHCPv6OptInterfaceID DHCPv6Opt = 18 + DHCPv6OptReconfigureMessage DHCPv6Opt = 19 + DHCPv6OptReconfigureAccept DHCPv6Opt = 20 + + // RFC 3319 Session Initiation Protocol (SIP) + DHCPv6OptSIPServersDomainList DHCPv6Opt = 21 + DHCPv6OptSIPServersAddressList DHCPv6Opt = 22 + + // RFC 3646 DNS Configuration + DHCPv6OptDNSServers DHCPv6Opt = 23 + DHCPv6OptDomainList DHCPv6Opt = 24 + + // RFC 3633 Prefix Delegation + DHCPv6OptIAPD DHCPv6Opt = 25 + DHCPv6OptIAPrefix DHCPv6Opt = 26 + + // RFC 3898 Network Information Service (NIS) + DHCPv6OptNISServers DHCPv6Opt = 27 + DHCPv6OptNISPServers DHCPv6Opt = 28 + DHCPv6OptNISDomainName DHCPv6Opt = 29 + DHCPv6OptNISPDomainName DHCPv6Opt = 30 + + // RFC 4075 Simple Network Time Protocol (SNTP) + DHCPv6OptSNTPServers DHCPv6Opt = 31 + + // RFC 4242 Information Refresh Time Option + DHCPv6OptInformationRefreshTime DHCPv6Opt = 32 + + // RFC 4280 Broadcast and Multicast Control Servers + DHCPv6OptBCMCSServerDomainNameList DHCPv6Opt = 33 + DHCPv6OptBCMCSServerAddressList DHCPv6Opt = 34 + + // RFC 4776 Civic Address ConfigurationOption + DHCPv6OptGeoconfCivic DHCPv6Opt = 36 + + // RFC 4649 Relay Agent Remote-ID + DHCPv6OptRemoteID DHCPv6Opt = 37 + + // RFC 4580 Relay Agent Subscriber-ID + DHCPv6OptSubscriberID DHCPv6Opt = 38 + + // RFC 4704 Client Full Qualified Domain Name (FQDN) + DHCPv6OptClientFQDN DHCPv6Opt = 39 + + // RFC 5192 Protocol for Carrying Authentication for Network Access (PANA) + DHCPv6OptPanaAgent DHCPv6Opt = 40 + + // RFC 4833 Timezone Options + DHCPv6OptNewPOSIXTimezone DHCPv6Opt = 41 + DHCPv6OptNewTZDBTimezone DHCPv6Opt = 42 + + // RFC 4994 Relay Agent Echo Request + DHCPv6OptEchoRequestOption DHCPv6Opt = 43 + + // RFC 5007 Leasequery + DHCPv6OptLQQuery DHCPv6Opt = 44 + DHCPv6OptCLTTime DHCPv6Opt = 45 + DHCPv6OptClientData DHCPv6Opt = 46 + DHCPv6OptLQRelayData DHCPv6Opt = 47 + DHCPv6OptLQClientLink DHCPv6Opt = 48 + + // RFC 6610 Home Information Discovery in Mobile IPv6 (MIPv6) + DHCPv6OptMIP6HNIDF DHCPv6Opt = 49 + DHCPv6OptMIP6VDINF DHCPv6Opt = 50 + DHCPv6OptMIP6IDINF DHCPv6Opt = 69 + DHCPv6OptMIP6UDINF DHCPv6Opt = 70 + DHCPv6OptMIP6HNP DHCPv6Opt = 71 + DHCPv6OptMIP6HAA DHCPv6Opt = 72 + DHCPv6OptMIP6HAF DHCPv6Opt = 73 + + // RFC 5223 Discovering Location-to-Service Translation (LoST) Servers + DHCPv6OptV6LOST DHCPv6Opt = 51 + + // RFC 5417 Control And Provisioning of Wireless Access Points (CAPWAP) + DHCPv6OptCAPWAPACV6 DHCPv6Opt = 52 + + // RFC 5460 Bulk Leasequery + DHCPv6OptRelayID DHCPv6Opt = 53 + + // RFC 5678 IEEE 802.21 Mobility Services (MoS) Discovery + DHCPv6OptIPv6AddressMoS DHCPv6Opt = 54 + DHCPv6OptIPv6FQDNMoS DHCPv6Opt = 55 + + // RFC 5908 NTP Server Option + DHCPv6OptNTPServer DHCPv6Opt = 56 + + // RFC 5986 Discovering the Local Location Information Server (LIS) + DHCPv6OptV6AccessDomain DHCPv6Opt = 57 + + // RFC 5986 SIP User Agent + DHCPv6OptSIPUACSList DHCPv6Opt = 58 + + // RFC 5970 Options for Network Boot + DHCPv6OptBootFileURL DHCPv6Opt = 59 + DHCPv6OptBootFileParam DHCPv6Opt = 60 + DHCPv6OptClientArchType DHCPv6Opt = 61 + DHCPv6OptNII DHCPv6Opt = 62 + + // RFC 6225 Coordinate-Based Location Configuration Information + DHCPv6OptGeolocation DHCPv6Opt = 63 + + // RFC 6334 Dual-Stack Lite + DHCPv6OptAFTRName DHCPv6Opt = 64 + + // RFC 6440 EAP Re-authentication Protocol (ERP) + DHCPv6OptERPLocalDomainName DHCPv6Opt = 65 + + // RFC 6422 Relay-Supplied DHCP Options + DHCPv6OptRSOO DHCPv6Opt = 66 + + // RFC 6603 Prefix Exclude Option for DHCPv6-based Prefix Delegation + DHCPv6OptPDExclude DHCPv6Opt = 67 + + // RFC 6607 Virtual Subnet Selection + DHCPv6OptVSS DHCPv6Opt = 68 + + // RFC 6731 Improved Recursive DNS Server Selection for Multi-Interfaced Nodes + DHCPv6OptRDNSSSelection DHCPv6Opt = 74 + + // RFC 6784 Kerberos Options for DHCPv6 + DHCPv6OptKRBPrincipalName DHCPv6Opt = 75 + DHCPv6OptKRBRealmName DHCPv6Opt = 76 + DHCPv6OptKRBKDC DHCPv6Opt = 77 + + // RFC 6939 Client Link-Layer Address Option + DHCPv6OptClientLinkLayerAddress DHCPv6Opt = 79 + + // RFC 6977 Triggering DHCPv6 Reconfiguration from Relay Agents + DHCPv6OptLinkAddress DHCPv6Opt = 80 + + // RFC 7037 RADIUS Option for the DHCPv6 Relay Agent + DHCPv6OptRADIUS DHCPv6Opt = 81 + + // RFC 7083 Modification to Default Values of SOL_MAX_RT and INF_MAX_RT + DHCPv6OptSolMaxRt DHCPv6Opt = 82 + DHCPv6OptInfMaxRt DHCPv6Opt = 83 + + // RFC 7078 Distributing Address Selection Policy + DHCPv6OptAddrSel DHCPv6Opt = 84 + DHCPv6OptAddrSelTable DHCPv6Opt = 85 + + // RFC 7291 DHCP Options for the Port Control Protocol (PCP) + DHCPv6OptV6PCPServer DHCPv6Opt = 86 + + // RFC 7341 DHCPv4-over-DHCPv6 (DHCP 4o6) Transport + DHCPv6OptDHCPv4Message DHCPv6Opt = 87 + DHCPv6OptDHCPv4OverDHCPv6Server DHCPv6Opt = 88 + + // RFC 7598 Configuration of Softwire Address and Port-Mapped Clients + DHCPv6OptS46Rule DHCPv6Opt = 89 + DHCPv6OptS46BR DHCPv6Opt = 90 + DHCPv6OptS46DMR DHCPv6Opt = 91 + DHCPv6OptS46V4V4Bind DHCPv6Opt = 92 + DHCPv6OptS46PortParameters DHCPv6Opt = 93 + DHCPv6OptS46ContMAPE DHCPv6Opt = 94 + DHCPv6OptS46ContMAPT DHCPv6Opt = 95 + DHCPv6OptS46ContLW DHCPv6Opt = 96 + + // RFC 7600 IPv4 Residual Deployment via IPv6 + DHCPv6Opt4RD DHCPv6Opt = 97 + DHCPv6Opt4RDMapRule DHCPv6Opt = 98 + DHCPv6Opt4RDNonMapRule DHCPv6Opt = 99 + + // RFC 7653 Active Leasequery + DHCPv6OptLQBaseTime DHCPv6Opt = 100 + DHCPv6OptLQStartTime DHCPv6Opt = 101 + DHCPv6OptLQEndTime DHCPv6Opt = 102 + + // RFC 7710 Captive-Portal Identification + DHCPv6OptCaptivePortal DHCPv6Opt = 103 + + // RFC 7774 Multicast Protocol for Low-Power and Lossy Networks (MPL) Parameter Configuration + DHCPv6OptMPLParameters DHCPv6Opt = 104 + + // RFC 7839 Access-Network-Identifier (ANI) + DHCPv6OptANIATT DHCPv6Opt = 105 + DHCPv6OptANINetworkName DHCPv6Opt = 106 + DHCPv6OptANIAPName DHCPv6Opt = 107 + DHCPv6OptANIAPBSSID DHCPv6Opt = 108 + DHCPv6OptANIOperatorID DHCPv6Opt = 109 + DHCPv6OptANIOperatorRealm DHCPv6Opt = 110 + + // RFC 8026 Unified IPv4-in-IPv6 Softwire Customer Premises Equipment (CPE) + DHCPv6OptS46Priority DHCPv6Opt = 111 + + // draft-ietf-opsawg-mud-25 Manufacturer Usage Description (MUD) + DHCPv6OptMUDURLV6 DHCPv6Opt = 112 + + // RFC 8115 IPv4-Embedded Multicast and Unicast IPv6 Prefixes + DHCPv6OptV6Prefix64 DHCPv6Opt = 113 + + // RFC 8156 DHCPv6 Failover Protocol + DHCPv6OptFBindingStatus DHCPv6Opt = 114 + DHCPv6OptFConnectFlags DHCPv6Opt = 115 + DHCPv6OptFDNSRemovalInfo DHCPv6Opt = 116 + DHCPv6OptFDNSHostName DHCPv6Opt = 117 + DHCPv6OptFDNSZoneName DHCPv6Opt = 118 + DHCPv6OptFDNSFlags DHCPv6Opt = 119 + DHCPv6OptFExpirationTime DHCPv6Opt = 120 + DHCPv6OptFMaxUnacknowledgedBNDUPD DHCPv6Opt = 121 + DHCPv6OptFMCLT DHCPv6Opt = 122 + DHCPv6OptFPartnerLifetime DHCPv6Opt = 123 + DHCPv6OptFPartnerLifetimeSent DHCPv6Opt = 124 + DHCPv6OptFPartnerDownTime DHCPv6Opt = 125 + DHCPv6OptFPartnerRawCltTime DHCPv6Opt = 126 + DHCPv6OptFProtocolVersion DHCPv6Opt = 127 + DHCPv6OptFKeepaliveTime DHCPv6Opt = 128 + DHCPv6OptFReconfigureData DHCPv6Opt = 129 + DHCPv6OptFRelationshipName DHCPv6Opt = 130 + DHCPv6OptFServerFlags DHCPv6Opt = 131 + DHCPv6OptFServerState DHCPv6Opt = 132 + DHCPv6OptFStartTimeOfState DHCPv6Opt = 133 + DHCPv6OptFStateExpirationTime DHCPv6Opt = 134 + + // RFC 8357 Generalized UDP Source Port for DHCP Relay + DHCPv6OptRelayPort DHCPv6Opt = 135 + + // draft-ietf-netconf-zerotouch-25 Zero Touch Provisioning for Networking Devices + DHCPv6OptV6ZeroTouchRedirect DHCPv6Opt = 136 + + // RFC 6153 Access Network Discovery and Selection Function (ANDSF) Discovery + DHCPv6OptIPV6AddressANDSF DHCPv6Opt = 143 +) + +// String returns a string version of a DHCPv6Opt. +func (o DHCPv6Opt) String() string { + switch o { + case DHCPv6OptClientID: + return "ClientID" + case DHCPv6OptServerID: + return "ServerID" + case DHCPv6OptIANA: + return "IA_NA" + case DHCPv6OptIATA: + return "IA_TA" + case DHCPv6OptIAAddr: + return "IAAddr" + case DHCPv6OptOro: + return "Oro" + case DHCPv6OptPreference: + return "Preference" + case DHCPv6OptElapsedTime: + return "ElapsedTime" + case DHCPv6OptRelayMessage: + return "RelayMessage" + case DHCPv6OptAuth: + return "Auth" + case DHCPv6OptUnicast: + return "Unicast" + case DHCPv6OptStatusCode: + return "StatusCode" + case DHCPv6OptRapidCommit: + return "RapidCommit" + case DHCPv6OptUserClass: + return "UserClass" + case DHCPv6OptVendorClass: + return "VendorClass" + case DHCPv6OptVendorOpts: + return "VendorOpts" + case DHCPv6OptInterfaceID: + return "InterfaceID" + case DHCPv6OptReconfigureMessage: + return "ReconfigureMessage" + case DHCPv6OptReconfigureAccept: + return "ReconfigureAccept" + case DHCPv6OptSIPServersDomainList: + return "SIPServersDomainList" + case DHCPv6OptSIPServersAddressList: + return "SIPServersAddressList" + case DHCPv6OptDNSServers: + return "DNSRecursiveNameServer" + case DHCPv6OptDomainList: + return "DomainSearchList" + case DHCPv6OptIAPD: + return "IdentityAssociationPrefixDelegation" + case DHCPv6OptIAPrefix: + return "IAPDPrefix" + case DHCPv6OptNISServers: + return "NISServers" + case DHCPv6OptNISPServers: + return "NISv2Servers" + case DHCPv6OptNISDomainName: + return "NISDomainName" + case DHCPv6OptNISPDomainName: + return "NISv2DomainName" + case DHCPv6OptSNTPServers: + return "SNTPServers" + case DHCPv6OptInformationRefreshTime: + return "InformationRefreshTime" + case DHCPv6OptBCMCSServerDomainNameList: + return "BCMCSControlServersDomainNameList" + case DHCPv6OptBCMCSServerAddressList: + return "BCMCSControlServersAddressList" + case DHCPv6OptGeoconfCivic: + return "CivicAddress" + case DHCPv6OptRemoteID: + return "RelayAgentRemoteID" + case DHCPv6OptSubscriberID: + return "RelayAgentSubscriberID" + case DHCPv6OptClientFQDN: + return "ClientFQDN" + case DHCPv6OptPanaAgent: + return "PANAAuthenticationAgent" + case DHCPv6OptNewPOSIXTimezone: + return "NewPOSIXTimezone" + case DHCPv6OptNewTZDBTimezone: + return "NewTZDBTimezone" + case DHCPv6OptEchoRequestOption: + return "EchoRequest" + case DHCPv6OptLQQuery: + return "LeasequeryQuery" + case DHCPv6OptClientData: + return "LeasequeryClientData" + case DHCPv6OptCLTTime: + return "LeasequeryClientLastTransactionTime" + case DHCPv6OptLQRelayData: + return "LeasequeryRelayData" + case DHCPv6OptLQClientLink: + return "LeasequeryClientLink" + case DHCPv6OptMIP6HNIDF: + return "MIPv6HomeNetworkIDFQDN" + case DHCPv6OptMIP6VDINF: + return "MIPv6VisitedHomeNetworkInformation" + case DHCPv6OptMIP6IDINF: + return "MIPv6IdentifiedHomeNetworkInformation" + case DHCPv6OptMIP6UDINF: + return "MIPv6UnrestrictedHomeNetworkInformation" + case DHCPv6OptMIP6HNP: + return "MIPv6HomeNetworkPrefix" + case DHCPv6OptMIP6HAA: + return "MIPv6HomeAgentAddress" + case DHCPv6OptMIP6HAF: + return "MIPv6HomeAgentFQDN" + case DHCPv6OptV6LOST: + return "LoST Server" + case DHCPv6OptCAPWAPACV6: + return "CAPWAPAccessControllerV6" + case DHCPv6OptRelayID: + return "LeasequeryRelayID" + case DHCPv6OptIPv6AddressMoS: + return "MoSIPv6Address" + case DHCPv6OptIPv6FQDNMoS: + return "MoSDomainNameList" + case DHCPv6OptNTPServer: + return "NTPServer" + case DHCPv6OptV6AccessDomain: + return "AccessNetworkDomainName" + case DHCPv6OptSIPUACSList: + return "SIPUserAgentConfigurationServiceDomains" + case DHCPv6OptBootFileURL: + return "BootFileURL" + case DHCPv6OptBootFileParam: + return "BootFileParameters" + case DHCPv6OptClientArchType: + return "ClientSystemArchitectureType" + case DHCPv6OptNII: + return "ClientNetworkInterfaceIdentifier" + case DHCPv6OptGeolocation: + return "Geolocation" + case DHCPv6OptAFTRName: + return "AFTRName" + case DHCPv6OptERPLocalDomainName: + return "AFTRName" + case DHCPv6OptRSOO: + return "RSOOption" + case DHCPv6OptPDExclude: + return "PrefixExclude" + case DHCPv6OptVSS: + return "VirtualSubnetSelection" + case DHCPv6OptRDNSSSelection: + return "RDNSSSelection" + case DHCPv6OptKRBPrincipalName: + return "KerberosPrincipalName" + case DHCPv6OptKRBRealmName: + return "KerberosRealmName" + case DHCPv6OptKRBKDC: + return "KerberosKDC" + case DHCPv6OptClientLinkLayerAddress: + return "ClientLinkLayerAddress" + case DHCPv6OptLinkAddress: + return "LinkAddress" + case DHCPv6OptRADIUS: + return "RADIUS" + case DHCPv6OptSolMaxRt: + return "SolMaxRt" + case DHCPv6OptInfMaxRt: + return "InfMaxRt" + case DHCPv6OptAddrSel: + return "AddressSelection" + case DHCPv6OptAddrSelTable: + return "AddressSelectionTable" + case DHCPv6OptV6PCPServer: + return "PCPServer" + case DHCPv6OptDHCPv4Message: + return "DHCPv4Message" + case DHCPv6OptDHCPv4OverDHCPv6Server: + return "DHCP4o6ServerAddress" + case DHCPv6OptS46Rule: + return "S46Rule" + case DHCPv6OptS46BR: + return "S46BR" + case DHCPv6OptS46DMR: + return "S46DMR" + case DHCPv6OptS46V4V4Bind: + return "S46IPv4IPv6AddressBinding" + case DHCPv6OptS46PortParameters: + return "S46PortParameters" + case DHCPv6OptS46ContMAPE: + return "S46MAPEContainer" + case DHCPv6OptS46ContMAPT: + return "S46MAPTContainer" + case DHCPv6OptS46ContLW: + return "S46Lightweight4Over6Container" + case DHCPv6Opt4RD: + return "4RD" + case DHCPv6Opt4RDMapRule: + return "4RDMapRule" + case DHCPv6Opt4RDNonMapRule: + return "4RDNonMapRule" + case DHCPv6OptLQBaseTime: + return "LQBaseTime" + case DHCPv6OptLQStartTime: + return "LQStartTime" + case DHCPv6OptLQEndTime: + return "LQEndTime" + case DHCPv6OptCaptivePortal: + return "CaptivePortal" + case DHCPv6OptMPLParameters: + return "MPLParameterConfiguration" + case DHCPv6OptANIATT: + return "ANIAccessTechnologyType" + case DHCPv6OptANINetworkName: + return "ANINetworkName" + case DHCPv6OptANIAPName: + return "ANIAccessPointName" + case DHCPv6OptANIAPBSSID: + return "ANIAccessPointBSSID" + case DHCPv6OptANIOperatorID: + return "ANIOperatorIdentifier" + case DHCPv6OptANIOperatorRealm: + return "ANIOperatorRealm" + case DHCPv6OptS46Priority: + return "S64Priority" + case DHCPv6OptMUDURLV6: + return "ManufacturerUsageDescriptionURL" + case DHCPv6OptV6Prefix64: + return "V6Prefix64" + case DHCPv6OptFBindingStatus: + return "FailoverBindingStatus" + case DHCPv6OptFConnectFlags: + return "FailoverConnectFlags" + case DHCPv6OptFDNSRemovalInfo: + return "FailoverDNSRemovalInfo" + case DHCPv6OptFDNSHostName: + return "FailoverDNSHostName" + case DHCPv6OptFDNSZoneName: + return "FailoverDNSZoneName" + case DHCPv6OptFDNSFlags: + return "FailoverDNSFlags" + case DHCPv6OptFExpirationTime: + return "FailoverExpirationTime" + case DHCPv6OptFMaxUnacknowledgedBNDUPD: + return "FailoverMaxUnacknowledgedBNDUPDMessages" + case DHCPv6OptFMCLT: + return "FailoverMaximumClientLeadTime" + case DHCPv6OptFPartnerLifetime: + return "FailoverPartnerLifetime" + case DHCPv6OptFPartnerLifetimeSent: + return "FailoverPartnerLifetimeSent" + case DHCPv6OptFPartnerDownTime: + return "FailoverPartnerDownTime" + case DHCPv6OptFPartnerRawCltTime: + return "FailoverPartnerRawClientLeadTime" + case DHCPv6OptFProtocolVersion: + return "FailoverProtocolVersion" + case DHCPv6OptFKeepaliveTime: + return "FailoverKeepaliveTime" + case DHCPv6OptFReconfigureData: + return "FailoverReconfigureData" + case DHCPv6OptFRelationshipName: + return "FailoverRelationshipName" + case DHCPv6OptFServerFlags: + return "FailoverServerFlags" + case DHCPv6OptFServerState: + return "FailoverServerState" + case DHCPv6OptFStartTimeOfState: + return "FailoverStartTimeOfState" + case DHCPv6OptFStateExpirationTime: + return "FailoverStateExpirationTime" + case DHCPv6OptRelayPort: + return "RelayPort" + case DHCPv6OptV6ZeroTouchRedirect: + return "ZeroTouch" + case DHCPv6OptIPV6AddressANDSF: + return "ANDSFIPv6Address" + default: + return fmt.Sprintf("Unknown(%d)", uint16(o)) + } +} + +// DHCPv6Options is used to get nicely printed option lists which would normally +// be cut off after 5 options. +type DHCPv6Options []DHCPv6Option + +// String returns a string version of the options list. +func (o DHCPv6Options) String() string { + buf := &bytes.Buffer{} + buf.WriteByte('[') + for i, opt := range o { + buf.WriteString(opt.String()) + if i+1 != len(o) { + buf.WriteString(", ") + } + } + buf.WriteByte(']') + return buf.String() +} + +// DHCPv6Option rerpresents a DHCP option. +type DHCPv6Option struct { + Code DHCPv6Opt + Length uint16 + Data []byte +} + +// String returns a string version of a DHCP Option. +func (o DHCPv6Option) String() string { + switch o.Code { + case DHCPv6OptClientID, DHCPv6OptServerID: + duid, err := decodeDHCPv6DUID(o.Data) + if err != nil { + return fmt.Sprintf("Option(%s:INVALID)", o.Code) + } + return fmt.Sprintf("Option(%s:[%s])", o.Code, duid.String()) + case DHCPv6OptOro: + options := "" + for i := 0; i < int(o.Length); i += 2 { + if options != "" { + options += "," + } + option := DHCPv6Opt(binary.BigEndian.Uint16(o.Data[i : i+2])) + options += option.String() + } + return fmt.Sprintf("Option(%s:[%s])", o.Code, options) + default: + return fmt.Sprintf("Option(%s:%v)", o.Code, o.Data) + } +} + +// NewDHCPv6Option constructs a new DHCPv6Option with a given type and data. +func NewDHCPv6Option(code DHCPv6Opt, data []byte) DHCPv6Option { + o := DHCPv6Option{Code: code} + if data != nil { + o.Data = data + o.Length = uint16(len(data)) + } + + return o +} + +func (o *DHCPv6Option) encode(b []byte, opts gopacket.SerializeOptions) error { + binary.BigEndian.PutUint16(b[0:2], uint16(o.Code)) + if opts.FixLengths { + binary.BigEndian.PutUint16(b[2:4], uint16(len(o.Data))) + } else { + binary.BigEndian.PutUint16(b[2:4], o.Length) + } + copy(b[4:], o.Data) + + return nil +} + +func (o *DHCPv6Option) decode(data []byte) error { + if len(data) < 4 { + return errors.New("not enough data to decode") + } + o.Code = DHCPv6Opt(binary.BigEndian.Uint16(data[0:2])) + o.Length = binary.BigEndian.Uint16(data[2:4]) + if len(data) < 4+int(o.Length) { + return fmt.Errorf("dhcpv6 option size < length %d", 4+o.Length) + } + o.Data = data[4 : 4+o.Length] + return nil +} diff --git a/vendor/github.com/google/gopacket/layers/dns.go b/vendor/github.com/google/gopacket/layers/dns.go new file mode 100644 index 0000000000..de55294b5b --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/dns.go @@ -0,0 +1,1098 @@ +// Copyright 2014, 2018 GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + "strings" + + "github.com/google/gopacket" +) + +// DNSClass defines the class associated with a request/response. Different DNS +// classes can be thought of as an array of parallel namespace trees. +type DNSClass uint16 + +// DNSClass known values. +const ( + DNSClassIN DNSClass = 1 // Internet + DNSClassCS DNSClass = 2 // the CSNET class (Obsolete) + DNSClassCH DNSClass = 3 // the CHAOS class + DNSClassHS DNSClass = 4 // Hesiod [Dyer 87] + DNSClassAny DNSClass = 255 // AnyClass +) + +func (dc DNSClass) String() string { + switch dc { + default: + return "Unknown" + case DNSClassIN: + return "IN" + case DNSClassCS: + return "CS" + case DNSClassCH: + return "CH" + case DNSClassHS: + return "HS" + case DNSClassAny: + return "Any" + } +} + +// DNSType defines the type of data being requested/returned in a +// question/answer. +type DNSType uint16 + +// DNSType known values. +const ( + DNSTypeA DNSType = 1 // a host address + DNSTypeNS DNSType = 2 // an authoritative name server + DNSTypeMD DNSType = 3 // a mail destination (Obsolete - use MX) + DNSTypeMF DNSType = 4 // a mail forwarder (Obsolete - use MX) + DNSTypeCNAME DNSType = 5 // the canonical name for an alias + DNSTypeSOA DNSType = 6 // marks the start of a zone of authority + DNSTypeMB DNSType = 7 // a mailbox domain name (EXPERIMENTAL) + DNSTypeMG DNSType = 8 // a mail group member (EXPERIMENTAL) + DNSTypeMR DNSType = 9 // a mail rename domain name (EXPERIMENTAL) + DNSTypeNULL DNSType = 10 // a null RR (EXPERIMENTAL) + DNSTypeWKS DNSType = 11 // a well known service description + DNSTypePTR DNSType = 12 // a domain name pointer + DNSTypeHINFO DNSType = 13 // host information + DNSTypeMINFO DNSType = 14 // mailbox or mail list information + DNSTypeMX DNSType = 15 // mail exchange + DNSTypeTXT DNSType = 16 // text strings + DNSTypeAAAA DNSType = 28 // a IPv6 host address [RFC3596] + DNSTypeSRV DNSType = 33 // server discovery [RFC2782] [RFC6195] + DNSTypeOPT DNSType = 41 // OPT Pseudo-RR [RFC6891] + DNSTypeURI DNSType = 256 // URI RR [RFC7553] +) + +func (dt DNSType) String() string { + switch dt { + default: + return "Unknown" + case DNSTypeA: + return "A" + case DNSTypeNS: + return "NS" + case DNSTypeMD: + return "MD" + case DNSTypeMF: + return "MF" + case DNSTypeCNAME: + return "CNAME" + case DNSTypeSOA: + return "SOA" + case DNSTypeMB: + return "MB" + case DNSTypeMG: + return "MG" + case DNSTypeMR: + return "MR" + case DNSTypeNULL: + return "NULL" + case DNSTypeWKS: + return "WKS" + case DNSTypePTR: + return "PTR" + case DNSTypeHINFO: + return "HINFO" + case DNSTypeMINFO: + return "MINFO" + case DNSTypeMX: + return "MX" + case DNSTypeTXT: + return "TXT" + case DNSTypeAAAA: + return "AAAA" + case DNSTypeSRV: + return "SRV" + case DNSTypeOPT: + return "OPT" + case DNSTypeURI: + return "URI" + } +} + +// DNSResponseCode provides response codes for question answers. +type DNSResponseCode uint8 + +// DNSResponseCode known values. +const ( + DNSResponseCodeNoErr DNSResponseCode = 0 // No error + DNSResponseCodeFormErr DNSResponseCode = 1 // Format Error [RFC1035] + DNSResponseCodeServFail DNSResponseCode = 2 // Server Failure [RFC1035] + DNSResponseCodeNXDomain DNSResponseCode = 3 // Non-Existent Domain [RFC1035] + DNSResponseCodeNotImp DNSResponseCode = 4 // Not Implemented [RFC1035] + DNSResponseCodeRefused DNSResponseCode = 5 // Query Refused [RFC1035] + DNSResponseCodeYXDomain DNSResponseCode = 6 // Name Exists when it should not [RFC2136] + DNSResponseCodeYXRRSet DNSResponseCode = 7 // RR Set Exists when it should not [RFC2136] + DNSResponseCodeNXRRSet DNSResponseCode = 8 // RR Set that should exist does not [RFC2136] + DNSResponseCodeNotAuth DNSResponseCode = 9 // Server Not Authoritative for zone [RFC2136] + DNSResponseCodeNotZone DNSResponseCode = 10 // Name not contained in zone [RFC2136] + DNSResponseCodeBadVers DNSResponseCode = 16 // Bad OPT Version [RFC2671] + DNSResponseCodeBadSig DNSResponseCode = 16 // TSIG Signature Failure [RFC2845] + DNSResponseCodeBadKey DNSResponseCode = 17 // Key not recognized [RFC2845] + DNSResponseCodeBadTime DNSResponseCode = 18 // Signature out of time window [RFC2845] + DNSResponseCodeBadMode DNSResponseCode = 19 // Bad TKEY Mode [RFC2930] + DNSResponseCodeBadName DNSResponseCode = 20 // Duplicate key name [RFC2930] + DNSResponseCodeBadAlg DNSResponseCode = 21 // Algorithm not supported [RFC2930] + DNSResponseCodeBadTruc DNSResponseCode = 22 // Bad Truncation [RFC4635] + DNSResponseCodeBadCookie DNSResponseCode = 23 // Bad/missing Server Cookie [RFC7873] +) + +func (drc DNSResponseCode) String() string { + switch drc { + default: + return "Unknown" + case DNSResponseCodeNoErr: + return "No Error" + case DNSResponseCodeFormErr: + return "Format Error" + case DNSResponseCodeServFail: + return "Server Failure " + case DNSResponseCodeNXDomain: + return "Non-Existent Domain" + case DNSResponseCodeNotImp: + return "Not Implemented" + case DNSResponseCodeRefused: + return "Query Refused" + case DNSResponseCodeYXDomain: + return "Name Exists when it should not" + case DNSResponseCodeYXRRSet: + return "RR Set Exists when it should not" + case DNSResponseCodeNXRRSet: + return "RR Set that should exist does not" + case DNSResponseCodeNotAuth: + return "Server Not Authoritative for zone" + case DNSResponseCodeNotZone: + return "Name not contained in zone" + case DNSResponseCodeBadVers: + return "Bad OPT Version" + case DNSResponseCodeBadKey: + return "Key not recognized" + case DNSResponseCodeBadTime: + return "Signature out of time window" + case DNSResponseCodeBadMode: + return "Bad TKEY Mode" + case DNSResponseCodeBadName: + return "Duplicate key name" + case DNSResponseCodeBadAlg: + return "Algorithm not supported" + case DNSResponseCodeBadTruc: + return "Bad Truncation" + case DNSResponseCodeBadCookie: + return "Bad Cookie" + } +} + +// DNSOpCode defines a set of different operation types. +type DNSOpCode uint8 + +// DNSOpCode known values. +const ( + DNSOpCodeQuery DNSOpCode = 0 // Query [RFC1035] + DNSOpCodeIQuery DNSOpCode = 1 // Inverse Query Obsolete [RFC3425] + DNSOpCodeStatus DNSOpCode = 2 // Status [RFC1035] + DNSOpCodeNotify DNSOpCode = 4 // Notify [RFC1996] + DNSOpCodeUpdate DNSOpCode = 5 // Update [RFC2136] +) + +func (doc DNSOpCode) String() string { + switch doc { + default: + return "Unknown" + case DNSOpCodeQuery: + return "Query" + case DNSOpCodeIQuery: + return "Inverse Query" + case DNSOpCodeStatus: + return "Status" + case DNSOpCodeNotify: + return "Notify" + case DNSOpCodeUpdate: + return "Update" + } +} + +// DNS is specified in RFC 1034 / RFC 1035 +// +---------------------+ +// | Header | +// +---------------------+ +// | Question | the question for the name server +// +---------------------+ +// | Answer | RRs answering the question +// +---------------------+ +// | Authority | RRs pointing toward an authority +// +---------------------+ +// | Additional | RRs holding additional information +// +---------------------+ +// +// DNS Header +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | ID | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// |QR| Opcode |AA|TC|RD|RA| Z | RCODE | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | QDCOUNT | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | ANCOUNT | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | NSCOUNT | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | ARCOUNT | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +// DNS contains data from a single Domain Name Service packet. +type DNS struct { + BaseLayer + + // Header fields + ID uint16 + QR bool + OpCode DNSOpCode + + AA bool // Authoritative answer + TC bool // Truncated + RD bool // Recursion desired + RA bool // Recursion available + Z uint8 // Reserved for future use + + ResponseCode DNSResponseCode + QDCount uint16 // Number of questions to expect + ANCount uint16 // Number of answers to expect + NSCount uint16 // Number of authorities to expect + ARCount uint16 // Number of additional records to expect + + // Entries + Questions []DNSQuestion + Answers []DNSResourceRecord + Authorities []DNSResourceRecord + Additionals []DNSResourceRecord + + // buffer for doing name decoding. We use a single reusable buffer to avoid + // name decoding on a single object via multiple DecodeFromBytes calls + // requiring constant allocation of small byte slices. + buffer []byte +} + +// LayerType returns gopacket.LayerTypeDNS. +func (d *DNS) LayerType() gopacket.LayerType { return LayerTypeDNS } + +// decodeDNS decodes the byte slice into a DNS type. It also +// setups the application Layer in PacketBuilder. +func decodeDNS(data []byte, p gopacket.PacketBuilder) error { + d := &DNS{} + err := d.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(d) + p.SetApplicationLayer(d) + return nil +} + +// DecodeFromBytes decodes the slice into the DNS struct. +func (d *DNS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + d.buffer = d.buffer[:0] + + if len(data) < 12 { + df.SetTruncated() + return errDNSPacketTooShort + } + + // since there are no further layers, the baselayer's content is + // pointing to this layer + d.BaseLayer = BaseLayer{Contents: data[:len(data)]} + d.ID = binary.BigEndian.Uint16(data[:2]) + d.QR = data[2]&0x80 != 0 + d.OpCode = DNSOpCode(data[2]>>3) & 0x0F + d.AA = data[2]&0x04 != 0 + d.TC = data[2]&0x02 != 0 + d.RD = data[2]&0x01 != 0 + d.RA = data[3]&0x80 != 0 + d.Z = uint8(data[3]>>4) & 0x7 + d.ResponseCode = DNSResponseCode(data[3] & 0xF) + d.QDCount = binary.BigEndian.Uint16(data[4:6]) + d.ANCount = binary.BigEndian.Uint16(data[6:8]) + d.NSCount = binary.BigEndian.Uint16(data[8:10]) + d.ARCount = binary.BigEndian.Uint16(data[10:12]) + + d.Questions = d.Questions[:0] + d.Answers = d.Answers[:0] + d.Authorities = d.Authorities[:0] + d.Additionals = d.Additionals[:0] + + offset := 12 + var err error + for i := 0; i < int(d.QDCount); i++ { + var q DNSQuestion + if offset, err = q.decode(data, offset, df, &d.buffer); err != nil { + return err + } + d.Questions = append(d.Questions, q) + } + + // For some horrible reason, if we do the obvious thing in this loop: + // var r DNSResourceRecord + // if blah := r.decode(blah); err != nil { + // return err + // } + // d.Foo = append(d.Foo, r) + // the Go compiler thinks that 'r' escapes to the heap, causing a malloc for + // every Answer, Authority, and Additional. To get around this, we do + // something really silly: we append an empty resource record to our slice, + // then use the last value in the slice to call decode. Since the value is + // already in the slice, there's no WAY it can escape... on the other hand our + // code is MUCH uglier :( + for i := 0; i < int(d.ANCount); i++ { + d.Answers = append(d.Answers, DNSResourceRecord{}) + if offset, err = d.Answers[i].decode(data, offset, df, &d.buffer); err != nil { + d.Answers = d.Answers[:i] // strip off erroneous value + return err + } + } + for i := 0; i < int(d.NSCount); i++ { + d.Authorities = append(d.Authorities, DNSResourceRecord{}) + if offset, err = d.Authorities[i].decode(data, offset, df, &d.buffer); err != nil { + d.Authorities = d.Authorities[:i] // strip off erroneous value + return err + } + } + for i := 0; i < int(d.ARCount); i++ { + d.Additionals = append(d.Additionals, DNSResourceRecord{}) + if offset, err = d.Additionals[i].decode(data, offset, df, &d.buffer); err != nil { + d.Additionals = d.Additionals[:i] // strip off erroneous value + return err + } + // extract extended RCODE from OPT RRs, RFC 6891 section 6.1.3 + if d.Additionals[i].Type == DNSTypeOPT { + d.ResponseCode = DNSResponseCode(uint8(d.ResponseCode) | uint8(d.Additionals[i].TTL>>20&0xF0)) + } + } + + if uint16(len(d.Questions)) != d.QDCount { + return errDecodeQueryBadQDCount + } else if uint16(len(d.Answers)) != d.ANCount { + return errDecodeQueryBadANCount + } else if uint16(len(d.Authorities)) != d.NSCount { + return errDecodeQueryBadNSCount + } else if uint16(len(d.Additionals)) != d.ARCount { + return errDecodeQueryBadARCount + } + return nil +} + +// CanDecode implements gopacket.DecodingLayer. +func (d *DNS) CanDecode() gopacket.LayerClass { + return LayerTypeDNS +} + +// NextLayerType implements gopacket.DecodingLayer. +func (d *DNS) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// Payload returns nil. +func (d *DNS) Payload() []byte { + return nil +} + +func b2i(b bool) int { + if b { + return 1 + } + return 0 +} + +func recSize(rr *DNSResourceRecord) int { + switch rr.Type { + case DNSTypeA: + return 4 + case DNSTypeAAAA: + return 16 + case DNSTypeNS: + return len(rr.NS) + 2 + case DNSTypeCNAME: + return len(rr.CNAME) + 2 + case DNSTypePTR: + return len(rr.PTR) + 2 + case DNSTypeSOA: + return len(rr.SOA.MName) + 2 + len(rr.SOA.RName) + 2 + 20 + case DNSTypeMX: + return 2 + len(rr.MX.Name) + 2 + case DNSTypeTXT: + l := len(rr.TXTs) + for _, txt := range rr.TXTs { + l += len(txt) + } + return l + case DNSTypeSRV: + return 6 + len(rr.SRV.Name) + 2 + case DNSTypeURI: + return 4 + len(rr.URI.Target) + case DNSTypeOPT: + l := len(rr.OPT) * 4 + for _, opt := range rr.OPT { + l += len(opt.Data) + } + return l + } + + return 0 +} + +func computeSize(recs []DNSResourceRecord) int { + sz := 0 + for _, rr := range recs { + v := len(rr.Name) + + if v == 0 { + sz += v + 11 + } else { + sz += v + 12 + } + + sz += recSize(&rr) + } + return sz +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +func (d *DNS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + dsz := 0 + for _, q := range d.Questions { + dsz += len(q.Name) + 6 + } + dsz += computeSize(d.Answers) + dsz += computeSize(d.Authorities) + dsz += computeSize(d.Additionals) + + bytes, err := b.PrependBytes(12 + dsz) + if err != nil { + return err + } + binary.BigEndian.PutUint16(bytes, d.ID) + bytes[2] = byte((b2i(d.QR) << 7) | (int(d.OpCode) << 3) | (b2i(d.AA) << 2) | (b2i(d.TC) << 1) | b2i(d.RD)) + bytes[3] = byte((b2i(d.RA) << 7) | (int(d.Z) << 4) | int(d.ResponseCode)) + + if opts.FixLengths { + d.QDCount = uint16(len(d.Questions)) + d.ANCount = uint16(len(d.Answers)) + d.NSCount = uint16(len(d.Authorities)) + d.ARCount = uint16(len(d.Additionals)) + } + binary.BigEndian.PutUint16(bytes[4:], d.QDCount) + binary.BigEndian.PutUint16(bytes[6:], d.ANCount) + binary.BigEndian.PutUint16(bytes[8:], d.NSCount) + binary.BigEndian.PutUint16(bytes[10:], d.ARCount) + + off := 12 + for _, qd := range d.Questions { + n := qd.encode(bytes, off) + off += n + } + + for i := range d.Answers { + // done this way so we can modify DNSResourceRecord to fix + // lengths if requested + qa := &d.Answers[i] + n, err := qa.encode(bytes, off, opts) + if err != nil { + return err + } + off += n + } + + for i := range d.Authorities { + qa := &d.Authorities[i] + n, err := qa.encode(bytes, off, opts) + if err != nil { + return err + } + off += n + } + for i := range d.Additionals { + qa := &d.Additionals[i] + n, err := qa.encode(bytes, off, opts) + if err != nil { + return err + } + off += n + } + + return nil +} + +const maxRecursionLevel = 255 + +func decodeName(data []byte, offset int, buffer *[]byte, level int) ([]byte, int, error) { + if level > maxRecursionLevel { + return nil, 0, errMaxRecursion + } else if offset >= len(data) { + return nil, 0, errDNSNameOffsetTooHigh + } else if offset < 0 { + return nil, 0, errDNSNameOffsetNegative + } + start := len(*buffer) + index := offset + if data[index] == 0x00 { + return nil, index + 1, nil + } +loop: + for data[index] != 0x00 { + switch data[index] & 0xc0 { + default: + /* RFC 1035 + A domain name represented as a sequence of labels, where + each label consists of a length octet followed by that + number of octets. The domain name terminates with the + zero length octet for the null label of the root. Note + that this field may be an odd number of octets; no + padding is used. + */ + index2 := index + int(data[index]) + 1 + if index2-offset > 255 { + return nil, 0, errDNSNameTooLong + } else if index2 < index+1 || index2 > len(data) { + return nil, 0, errDNSNameInvalidIndex + } + *buffer = append(*buffer, '.') + *buffer = append(*buffer, data[index+1:index2]...) + index = index2 + + case 0xc0: + /* RFC 1035 + The pointer takes the form of a two octet sequence. + + The first two bits are ones. This allows a pointer to + be distinguished from a label, since the label must + begin with two zero bits because labels are restricted + to 63 octets or less. (The 10 and 01 combinations are + reserved for future use.) The OFFSET field specifies + an offset from the start of the message (i.e., the + first octet of the ID field in the domain header). A + zero offset specifies the first byte of the ID field, + etc. + + The compression scheme allows a domain name in a message to be + represented as either: + - a sequence of labels ending in a zero octet + - a pointer + - a sequence of labels ending with a pointer + */ + if index+2 > len(data) { + return nil, 0, errDNSPointerOffsetTooHigh + } + offsetp := int(binary.BigEndian.Uint16(data[index:index+2]) & 0x3fff) + if offsetp > len(data) { + return nil, 0, errDNSPointerOffsetTooHigh + } + // This looks a little tricky, but actually isn't. Because of how + // decodeName is written, calling it appends the decoded name to the + // current buffer. We already have the start of the buffer, then, so + // once this call is done buffer[start:] will contain our full name. + _, _, err := decodeName(data, offsetp, buffer, level+1) + if err != nil { + return nil, 0, err + } + index++ // pointer is two bytes, so add an extra byte here. + break loop + /* EDNS, or other DNS option ? */ + case 0x40: // RFC 2673 + return nil, 0, fmt.Errorf("qname '0x40' - RFC 2673 unsupported yet (data=%x index=%d)", + data[index], index) + + case 0x80: + return nil, 0, fmt.Errorf("qname '0x80' unsupported yet (data=%x index=%d)", + data[index], index) + } + if index >= len(data) { + return nil, 0, errDNSIndexOutOfRange + } + } + if len(*buffer) <= start { + return (*buffer)[start:], index + 1, nil + } + return (*buffer)[start+1:], index + 1, nil +} + +// DNSQuestion wraps a single request (question) within a DNS query. +type DNSQuestion struct { + Name []byte + Type DNSType + Class DNSClass +} + +func (q *DNSQuestion) decode(data []byte, offset int, df gopacket.DecodeFeedback, buffer *[]byte) (int, error) { + name, endq, err := decodeName(data, offset, buffer, 1) + if err != nil { + return 0, err + } + + q.Name = name + q.Type = DNSType(binary.BigEndian.Uint16(data[endq : endq+2])) + q.Class = DNSClass(binary.BigEndian.Uint16(data[endq+2 : endq+4])) + + return endq + 4, nil +} + +func (q *DNSQuestion) encode(data []byte, offset int) int { + noff := encodeName(q.Name, data, offset) + nSz := noff - offset + binary.BigEndian.PutUint16(data[noff:], uint16(q.Type)) + binary.BigEndian.PutUint16(data[noff+2:], uint16(q.Class)) + return nSz + 4 +} + +// DNSResourceRecord +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | | +// / / +// / NAME / +// | | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | TYPE | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | CLASS | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | TTL | +// | | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | RDLENGTH | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--| +// / RDATA / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +// DNSResourceRecord wraps the data from a single DNS resource within a +// response. +type DNSResourceRecord struct { + // Header + Name []byte + Type DNSType + Class DNSClass + TTL uint32 + + // RDATA Raw Values + DataLength uint16 + Data []byte + + // RDATA Decoded Values + IP net.IP + NS, CNAME, PTR []byte + TXTs [][]byte + SOA DNSSOA + SRV DNSSRV + MX DNSMX + OPT []DNSOPT // See RFC 6891, section 6.1.2 + URI DNSURI + + // Undecoded TXT for backward compatibility + TXT []byte +} + +// decode decodes the resource record, returning the total length of the record. +func (rr *DNSResourceRecord) decode(data []byte, offset int, df gopacket.DecodeFeedback, buffer *[]byte) (int, error) { + name, endq, err := decodeName(data, offset, buffer, 1) + if err != nil { + return 0, err + } + + rr.Name = name + rr.Type = DNSType(binary.BigEndian.Uint16(data[endq : endq+2])) + rr.Class = DNSClass(binary.BigEndian.Uint16(data[endq+2 : endq+4])) + rr.TTL = binary.BigEndian.Uint32(data[endq+4 : endq+8]) + rr.DataLength = binary.BigEndian.Uint16(data[endq+8 : endq+10]) + end := endq + 10 + int(rr.DataLength) + if end > len(data) { + return 0, errDecodeRecordLength + } + rr.Data = data[endq+10 : end] + + if err = rr.decodeRData(data[:end], endq+10, buffer); err != nil { + return 0, err + } + + return endq + 10 + int(rr.DataLength), nil +} + +func encodeName(name []byte, data []byte, offset int) int { + l := 0 + for i := range name { + if name[i] == '.' { + data[offset+i-l] = byte(l) + l = 0 + } else { + // skip one to write the length + data[offset+i+1] = name[i] + l++ + } + } + + if len(name) == 0 { + data[offset] = 0x00 // terminal + return offset + 1 + } + + // length for final portion + data[offset+len(name)-l] = byte(l) + data[offset+len(name)+1] = 0x00 // terminal + return offset + len(name) + 2 +} + +func (rr *DNSResourceRecord) encode(data []byte, offset int, opts gopacket.SerializeOptions) (int, error) { + + noff := encodeName(rr.Name, data, offset) + nSz := noff - offset + + binary.BigEndian.PutUint16(data[noff:], uint16(rr.Type)) + binary.BigEndian.PutUint16(data[noff+2:], uint16(rr.Class)) + binary.BigEndian.PutUint32(data[noff+4:], uint32(rr.TTL)) + + switch rr.Type { + case DNSTypeA: + copy(data[noff+10:], rr.IP.To4()) + case DNSTypeAAAA: + copy(data[noff+10:], rr.IP) + case DNSTypeNS: + encodeName(rr.NS, data, noff+10) + case DNSTypeCNAME: + encodeName(rr.CNAME, data, noff+10) + case DNSTypePTR: + encodeName(rr.PTR, data, noff+10) + case DNSTypeSOA: + noff2 := encodeName(rr.SOA.MName, data, noff+10) + noff2 = encodeName(rr.SOA.RName, data, noff2) + binary.BigEndian.PutUint32(data[noff2:], rr.SOA.Serial) + binary.BigEndian.PutUint32(data[noff2+4:], rr.SOA.Refresh) + binary.BigEndian.PutUint32(data[noff2+8:], rr.SOA.Retry) + binary.BigEndian.PutUint32(data[noff2+12:], rr.SOA.Expire) + binary.BigEndian.PutUint32(data[noff2+16:], rr.SOA.Minimum) + case DNSTypeMX: + binary.BigEndian.PutUint16(data[noff+10:], rr.MX.Preference) + encodeName(rr.MX.Name, data, noff+12) + case DNSTypeTXT: + noff2 := noff + 10 + for _, txt := range rr.TXTs { + data[noff2] = byte(len(txt)) + copy(data[noff2+1:], txt) + noff2 += 1 + len(txt) + } + case DNSTypeSRV: + binary.BigEndian.PutUint16(data[noff+10:], rr.SRV.Priority) + binary.BigEndian.PutUint16(data[noff+12:], rr.SRV.Weight) + binary.BigEndian.PutUint16(data[noff+14:], rr.SRV.Port) + encodeName(rr.SRV.Name, data, noff+16) + case DNSTypeURI: + binary.BigEndian.PutUint16(data[noff+10:], rr.URI.Priority) + binary.BigEndian.PutUint16(data[noff+12:], rr.URI.Weight) + copy(data[noff+14:], rr.URI.Target) + case DNSTypeOPT: + noff2 := noff + 10 + for _, opt := range rr.OPT { + binary.BigEndian.PutUint16(data[noff2:], uint16(opt.Code)) + binary.BigEndian.PutUint16(data[noff2+2:], uint16(len(opt.Data))) + copy(data[noff2+4:], opt.Data) + noff2 += 4 + len(opt.Data) + } + default: + return 0, fmt.Errorf("serializing resource record of type %v not supported", rr.Type) + } + + // DataLength + dSz := recSize(rr) + binary.BigEndian.PutUint16(data[noff+8:], uint16(dSz)) + + if opts.FixLengths { + rr.DataLength = uint16(dSz) + } + + return nSz + 10 + dSz, nil +} + +func (rr *DNSResourceRecord) String() string { + + if rr.Type == DNSTypeOPT { + opts := make([]string, len(rr.OPT)) + for i, opt := range rr.OPT { + opts[i] = opt.String() + } + return "OPT " + strings.Join(opts, ",") + } + if rr.Type == DNSTypeURI { + return fmt.Sprintf("URI %d %d %s", rr.URI.Priority, rr.URI.Weight, string(rr.URI.Target)) + } + if rr.Class == DNSClassIN { + switch rr.Type { + case DNSTypeA, DNSTypeAAAA: + return rr.IP.String() + case DNSTypeNS: + return "NS " + string(rr.NS) + case DNSTypeCNAME: + return "CNAME " + string(rr.CNAME) + case DNSTypePTR: + return "PTR " + string(rr.PTR) + case DNSTypeTXT: + return "TXT " + string(rr.TXT) + } + } + + return fmt.Sprintf("<%v, %v>", rr.Class, rr.Type) +} + +func decodeCharacterStrings(data []byte) ([][]byte, error) { + strings := make([][]byte, 0, 1) + end := len(data) + for index, index2 := 0, 0; index != end; index = index2 { + index2 = index + 1 + int(data[index]) // index increases by 1..256 and does not overflow + if index2 > end { + return nil, errCharStringMissData + } + strings = append(strings, data[index+1:index2]) + } + return strings, nil +} + +func decodeOPTs(data []byte, offset int) ([]DNSOPT, error) { + allOPT := []DNSOPT{} + end := len(data) + + if offset == end { + return allOPT, nil // There is no data to read + } + + if offset+4 > end { + return allOPT, fmt.Errorf("DNSOPT record is of length %d, it should be at least length 4", end-offset) + } + + for i := offset; i < end; { + opt := DNSOPT{} + if len(data) < i+4 { + return allOPT, fmt.Errorf("Malformed DNSOPT record. Length %d < %d", len(data), i+4) + } + opt.Code = DNSOptionCode(binary.BigEndian.Uint16(data[i : i+2])) + l := binary.BigEndian.Uint16(data[i+2 : i+4]) + if i+4+int(l) > end { + return allOPT, fmt.Errorf("Malformed DNSOPT record. The length (%d) field implies a packet larger than the one received", l) + } + opt.Data = data[i+4 : i+4+int(l)] + allOPT = append(allOPT, opt) + i += int(l) + 4 + } + return allOPT, nil +} + +func (rr *DNSResourceRecord) decodeRData(data []byte, offset int, buffer *[]byte) error { + switch rr.Type { + case DNSTypeA: + rr.IP = rr.Data + case DNSTypeAAAA: + rr.IP = rr.Data + case DNSTypeTXT, DNSTypeHINFO: + rr.TXT = rr.Data + txts, err := decodeCharacterStrings(rr.Data) + if err != nil { + return err + } + rr.TXTs = txts + case DNSTypeNS: + name, _, err := decodeName(data, offset, buffer, 1) + if err != nil { + return err + } + rr.NS = name + case DNSTypeCNAME: + name, _, err := decodeName(data, offset, buffer, 1) + if err != nil { + return err + } + rr.CNAME = name + case DNSTypePTR: + name, _, err := decodeName(data, offset, buffer, 1) + if err != nil { + return err + } + rr.PTR = name + case DNSTypeSOA: + name, endq, err := decodeName(data, offset, buffer, 1) + if err != nil { + return err + } + rr.SOA.MName = name + name, endq, err = decodeName(data, endq, buffer, 1) + if err != nil { + return err + } + if len(data) < endq+20 { + return errors.New("SOA too small") + } + rr.SOA.RName = name + rr.SOA.Serial = binary.BigEndian.Uint32(data[endq : endq+4]) + rr.SOA.Refresh = binary.BigEndian.Uint32(data[endq+4 : endq+8]) + rr.SOA.Retry = binary.BigEndian.Uint32(data[endq+8 : endq+12]) + rr.SOA.Expire = binary.BigEndian.Uint32(data[endq+12 : endq+16]) + rr.SOA.Minimum = binary.BigEndian.Uint32(data[endq+16 : endq+20]) + case DNSTypeMX: + if len(data) < offset+2 { + return errors.New("MX too small") + } + rr.MX.Preference = binary.BigEndian.Uint16(data[offset : offset+2]) + name, _, err := decodeName(data, offset+2, buffer, 1) + if err != nil { + return err + } + rr.MX.Name = name + case DNSTypeURI: + if len(rr.Data) < 4 { + return errors.New("URI too small") + } + rr.URI.Priority = binary.BigEndian.Uint16(data[offset : offset+2]) + rr.URI.Weight = binary.BigEndian.Uint16(data[offset+2 : offset+4]) + rr.URI.Target = rr.Data[4:] + case DNSTypeSRV: + if len(data) < offset+6 { + return errors.New("SRV too small") + } + rr.SRV.Priority = binary.BigEndian.Uint16(data[offset : offset+2]) + rr.SRV.Weight = binary.BigEndian.Uint16(data[offset+2 : offset+4]) + rr.SRV.Port = binary.BigEndian.Uint16(data[offset+4 : offset+6]) + name, _, err := decodeName(data, offset+6, buffer, 1) + if err != nil { + return err + } + rr.SRV.Name = name + case DNSTypeOPT: + allOPT, err := decodeOPTs(data, offset) + if err != nil { + return err + } + rr.OPT = allOPT + } + return nil +} + +// DNSSOA is a Start of Authority record. Each domain requires a SOA record at +// the cutover where a domain is delegated from its parent. +type DNSSOA struct { + MName, RName []byte + Serial, Refresh, Retry, Expire, Minimum uint32 +} + +// DNSSRV is a Service record, defining a location (hostname/port) of a +// server/service. +type DNSSRV struct { + Priority, Weight, Port uint16 + Name []byte +} + +// DNSMX is a mail exchange record, defining a mail server for a recipient's +// domain. +type DNSMX struct { + Preference uint16 + Name []byte +} + +// DNSURI is a URI record, defining a target (URI) of a server/service +type DNSURI struct { + Priority, Weight uint16 + Target []byte +} + +// DNSOptionCode represents the code of a DNS Option, see RFC6891, section 6.1.2 +type DNSOptionCode uint16 + +func (doc DNSOptionCode) String() string { + switch doc { + default: + return "Unknown" + case DNSOptionCodeNSID: + return "NSID" + case DNSOptionCodeDAU: + return "DAU" + case DNSOptionCodeDHU: + return "DHU" + case DNSOptionCodeN3U: + return "N3U" + case DNSOptionCodeEDNSClientSubnet: + return "EDNSClientSubnet" + case DNSOptionCodeEDNSExpire: + return "EDNSExpire" + case DNSOptionCodeCookie: + return "Cookie" + case DNSOptionCodeEDNSKeepAlive: + return "EDNSKeepAlive" + case DNSOptionCodePadding: + return "CodePadding" + case DNSOptionCodeChain: + return "CodeChain" + case DNSOptionCodeEDNSKeyTag: + return "CodeEDNSKeyTag" + case DNSOptionCodeEDNSClientTag: + return "EDNSClientTag" + case DNSOptionCodeEDNSServerTag: + return "EDNSServerTag" + case DNSOptionCodeDeviceID: + return "DeviceID" + } +} + +// DNSOptionCode known values. See IANA +const ( + DNSOptionCodeNSID DNSOptionCode = 3 + DNSOptionCodeDAU DNSOptionCode = 5 + DNSOptionCodeDHU DNSOptionCode = 6 + DNSOptionCodeN3U DNSOptionCode = 7 + DNSOptionCodeEDNSClientSubnet DNSOptionCode = 8 + DNSOptionCodeEDNSExpire DNSOptionCode = 9 + DNSOptionCodeCookie DNSOptionCode = 10 + DNSOptionCodeEDNSKeepAlive DNSOptionCode = 11 + DNSOptionCodePadding DNSOptionCode = 12 + DNSOptionCodeChain DNSOptionCode = 13 + DNSOptionCodeEDNSKeyTag DNSOptionCode = 14 + DNSOptionCodeEDNSClientTag DNSOptionCode = 16 + DNSOptionCodeEDNSServerTag DNSOptionCode = 17 + DNSOptionCodeDeviceID DNSOptionCode = 26946 +) + +// DNSOPT is a DNS Option, see RFC6891, section 6.1.2 +type DNSOPT struct { + Code DNSOptionCode + Data []byte +} + +func (opt DNSOPT) String() string { + return fmt.Sprintf("%s=%x", opt.Code, opt.Data) +} + +var ( + errMaxRecursion = errors.New("max DNS recursion level hit") + + errDNSNameOffsetTooHigh = errors.New("dns name offset too high") + errDNSNameOffsetNegative = errors.New("dns name offset is negative") + errDNSPacketTooShort = errors.New("DNS packet too short") + errDNSNameTooLong = errors.New("dns name is too long") + errDNSNameInvalidIndex = errors.New("dns name uncomputable: invalid index") + errDNSPointerOffsetTooHigh = errors.New("dns offset pointer too high") + errDNSIndexOutOfRange = errors.New("dns index walked out of range") + errDNSNameHasNoData = errors.New("no dns data found for name") + + errCharStringMissData = errors.New("Insufficient data for a ") + + errDecodeRecordLength = errors.New("resource record length exceeds data") + + errDecodeQueryBadQDCount = errors.New("Invalid query decoding, not the right number of questions") + errDecodeQueryBadANCount = errors.New("Invalid query decoding, not the right number of answers") + errDecodeQueryBadNSCount = errors.New("Invalid query decoding, not the right number of authorities") + errDecodeQueryBadARCount = errors.New("Invalid query decoding, not the right number of additionals info") +) diff --git a/vendor/github.com/google/gopacket/layers/doc.go b/vendor/github.com/google/gopacket/layers/doc.go new file mode 100644 index 0000000000..3c882c3fa0 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/doc.go @@ -0,0 +1,61 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +/* +Package layers provides decoding layers for many common protocols. + +The layers package contains decode implementations for a number of different +types of packet layers. Users of gopacket will almost always want to also use +layers to actually decode packet data into useful pieces. To see the set of +protocols that gopacket/layers is currently able to decode, +look at the set of LayerTypes defined in the Variables sections. The +layers package also defines endpoints for many of the common packet layers +that have source/destination addresses associated with them, for example IPv4/6 +(IPs) and TCP/UDP (ports). +Finally, layers contains a number of useful enumerations (IPProtocol, +EthernetType, LinkType, PPPType, etc...). Many of these implement the +gopacket.Decoder interface, so they can be passed into gopacket as decoders. + +Most common protocol layers are named using acronyms or other industry-common +names (IPv4, TCP, PPP). Some of the less common ones have their names expanded +(CiscoDiscoveryProtocol). +For certain protocols, sub-parts of the protocol are split out into their own +layers (SCTP, for example). This is done mostly in cases where portions of the +protocol may fulfill the capabilities of interesting layers (SCTPData implements +ApplicationLayer, while base SCTP implements TransportLayer), or possibly +because splitting a protocol into a few layers makes decoding easier. + +This package is meant to be used with its parent, +http://github.com/google/gopacket. + +Port Types + +Instead of using raw uint16 or uint8 values for ports, we use a different port +type for every protocol, for example TCPPort and UDPPort. This allows us to +override string behavior for each port, which we do by setting up port name +maps (TCPPortNames, UDPPortNames, etc...). Well-known ports are annotated with +their protocol names, and their String function displays these names: + + p := TCPPort(80) + fmt.Printf("Number: %d String: %v", p, p) + // Prints: "Number: 80 String: 80(http)" + +Modifying Decode Behavior + +layers links together decoding through its enumerations. For example, after +decoding layer type Ethernet, it uses Ethernet.EthernetType as its next decoder. +All enumerations that act as decoders, like EthernetType, can be modified by +users depending on their preferences. For example, if you have a spiffy new +IPv4 decoder that works way better than the one built into layers, you can do +this: + + var mySpiffyIPv4Decoder gopacket.Decoder = ... + layers.EthernetTypeMetadata[EthernetTypeIPv4].DecodeWith = mySpiffyIPv4Decoder + +This will make all future ethernet packets use your new decoder to decode IPv4 +packets, instead of the built-in decoder used by gopacket. +*/ +package layers diff --git a/vendor/github.com/google/gopacket/layers/dot11.go b/vendor/github.com/google/gopacket/layers/dot11.go new file mode 100644 index 0000000000..3e64910610 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/dot11.go @@ -0,0 +1,2118 @@ +// Copyright 2014 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +// See http://standards.ieee.org/findstds/standard/802.11-2012.html for info on +// all of the layers in this file. + +package layers + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "net" + + "github.com/google/gopacket" +) + +// Dot11Flags contains the set of 8 flags in the IEEE 802.11 frame control +// header, all in one place. +type Dot11Flags uint8 + +const ( + Dot11FlagsToDS Dot11Flags = 1 << iota + Dot11FlagsFromDS + Dot11FlagsMF + Dot11FlagsRetry + Dot11FlagsPowerManagement + Dot11FlagsMD + Dot11FlagsWEP + Dot11FlagsOrder +) + +func (d Dot11Flags) ToDS() bool { + return d&Dot11FlagsToDS != 0 +} +func (d Dot11Flags) FromDS() bool { + return d&Dot11FlagsFromDS != 0 +} +func (d Dot11Flags) MF() bool { + return d&Dot11FlagsMF != 0 +} +func (d Dot11Flags) Retry() bool { + return d&Dot11FlagsRetry != 0 +} +func (d Dot11Flags) PowerManagement() bool { + return d&Dot11FlagsPowerManagement != 0 +} +func (d Dot11Flags) MD() bool { + return d&Dot11FlagsMD != 0 +} +func (d Dot11Flags) WEP() bool { + return d&Dot11FlagsWEP != 0 +} +func (d Dot11Flags) Order() bool { + return d&Dot11FlagsOrder != 0 +} + +// String provides a human readable string for Dot11Flags. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the Dot11Flags value, not its string. +func (a Dot11Flags) String() string { + var out bytes.Buffer + if a.ToDS() { + out.WriteString("TO-DS,") + } + if a.FromDS() { + out.WriteString("FROM-DS,") + } + if a.MF() { + out.WriteString("MF,") + } + if a.Retry() { + out.WriteString("Retry,") + } + if a.PowerManagement() { + out.WriteString("PowerManagement,") + } + if a.MD() { + out.WriteString("MD,") + } + if a.WEP() { + out.WriteString("WEP,") + } + if a.Order() { + out.WriteString("Order,") + } + + if length := out.Len(); length > 0 { + return string(out.Bytes()[:length-1]) // strip final comma + } + return "" +} + +type Dot11Reason uint16 + +// TODO: Verify these reasons, and append more reasons if necessary. + +const ( + Dot11ReasonReserved Dot11Reason = 1 + Dot11ReasonUnspecified Dot11Reason = 2 + Dot11ReasonAuthExpired Dot11Reason = 3 + Dot11ReasonDeauthStLeaving Dot11Reason = 4 + Dot11ReasonInactivity Dot11Reason = 5 + Dot11ReasonApFull Dot11Reason = 6 + Dot11ReasonClass2FromNonAuth Dot11Reason = 7 + Dot11ReasonClass3FromNonAss Dot11Reason = 8 + Dot11ReasonDisasStLeaving Dot11Reason = 9 + Dot11ReasonStNotAuth Dot11Reason = 10 +) + +// String provides a human readable string for Dot11Reason. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the Dot11Reason value, not its string. +func (a Dot11Reason) String() string { + switch a { + case Dot11ReasonReserved: + return "Reserved" + case Dot11ReasonUnspecified: + return "Unspecified" + case Dot11ReasonAuthExpired: + return "Auth. expired" + case Dot11ReasonDeauthStLeaving: + return "Deauth. st. leaving" + case Dot11ReasonInactivity: + return "Inactivity" + case Dot11ReasonApFull: + return "Ap. full" + case Dot11ReasonClass2FromNonAuth: + return "Class2 from non auth." + case Dot11ReasonClass3FromNonAss: + return "Class3 from non ass." + case Dot11ReasonDisasStLeaving: + return "Disass st. leaving" + case Dot11ReasonStNotAuth: + return "St. not auth." + default: + return "Unknown reason" + } +} + +type Dot11Status uint16 + +const ( + Dot11StatusSuccess Dot11Status = 0 + Dot11StatusFailure Dot11Status = 1 // Unspecified failure + Dot11StatusCannotSupportAllCapabilities Dot11Status = 10 // Cannot support all requested capabilities in the Capability Information field + Dot11StatusInabilityExistsAssociation Dot11Status = 11 // Reassociation denied due to inability to confirm that association exists + Dot11StatusAssociationDenied Dot11Status = 12 // Association denied due to reason outside the scope of this standard + Dot11StatusAlgorithmUnsupported Dot11Status = 13 // Responding station does not support the specified authentication algorithm + Dot11StatusOufOfExpectedSequence Dot11Status = 14 // Received an Authentication frame with authentication transaction sequence number out of expected sequence + Dot11StatusChallengeFailure Dot11Status = 15 // Authentication rejected because of challenge failure + Dot11StatusTimeout Dot11Status = 16 // Authentication rejected due to timeout waiting for next frame in sequence + Dot11StatusAPUnableToHandle Dot11Status = 17 // Association denied because AP is unable to handle additional associated stations + Dot11StatusRateUnsupported Dot11Status = 18 // Association denied due to requesting station not supporting all of the data rates in the BSSBasicRateSet parameter +) + +// String provides a human readable string for Dot11Status. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the Dot11Status value, not its string. +func (a Dot11Status) String() string { + switch a { + case Dot11StatusSuccess: + return "success" + case Dot11StatusFailure: + return "failure" + case Dot11StatusCannotSupportAllCapabilities: + return "cannot-support-all-capabilities" + case Dot11StatusInabilityExistsAssociation: + return "inability-exists-association" + case Dot11StatusAssociationDenied: + return "association-denied" + case Dot11StatusAlgorithmUnsupported: + return "algorithm-unsupported" + case Dot11StatusOufOfExpectedSequence: + return "out-of-expected-sequence" + case Dot11StatusChallengeFailure: + return "challenge-failure" + case Dot11StatusTimeout: + return "timeout" + case Dot11StatusAPUnableToHandle: + return "ap-unable-to-handle" + case Dot11StatusRateUnsupported: + return "rate-unsupported" + default: + return "unknown status" + } +} + +type Dot11AckPolicy uint8 + +const ( + Dot11AckPolicyNormal Dot11AckPolicy = 0 + Dot11AckPolicyNone Dot11AckPolicy = 1 + Dot11AckPolicyNoExplicit Dot11AckPolicy = 2 + Dot11AckPolicyBlock Dot11AckPolicy = 3 +) + +// String provides a human readable string for Dot11AckPolicy. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the Dot11AckPolicy value, not its string. +func (a Dot11AckPolicy) String() string { + switch a { + case Dot11AckPolicyNormal: + return "normal-ack" + case Dot11AckPolicyNone: + return "no-ack" + case Dot11AckPolicyNoExplicit: + return "no-explicit-ack" + case Dot11AckPolicyBlock: + return "block-ack" + default: + return "unknown-ack-policy" + } +} + +type Dot11Algorithm uint16 + +const ( + Dot11AlgorithmOpen Dot11Algorithm = 0 + Dot11AlgorithmSharedKey Dot11Algorithm = 1 +) + +// String provides a human readable string for Dot11Algorithm. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the Dot11Algorithm value, not its string. +func (a Dot11Algorithm) String() string { + switch a { + case Dot11AlgorithmOpen: + return "open" + case Dot11AlgorithmSharedKey: + return "shared-key" + default: + return "unknown-algorithm" + } +} + +type Dot11InformationElementID uint8 + +const ( + Dot11InformationElementIDSSID Dot11InformationElementID = 0 + Dot11InformationElementIDRates Dot11InformationElementID = 1 + Dot11InformationElementIDFHSet Dot11InformationElementID = 2 + Dot11InformationElementIDDSSet Dot11InformationElementID = 3 + Dot11InformationElementIDCFSet Dot11InformationElementID = 4 + Dot11InformationElementIDTIM Dot11InformationElementID = 5 + Dot11InformationElementIDIBSSSet Dot11InformationElementID = 6 + Dot11InformationElementIDCountryInfo Dot11InformationElementID = 7 + Dot11InformationElementIDHoppingPatternParam Dot11InformationElementID = 8 + Dot11InformationElementIDHoppingPatternTable Dot11InformationElementID = 9 + Dot11InformationElementIDRequest Dot11InformationElementID = 10 + Dot11InformationElementIDQBSSLoadElem Dot11InformationElementID = 11 + Dot11InformationElementIDEDCAParamSet Dot11InformationElementID = 12 + Dot11InformationElementIDTrafficSpec Dot11InformationElementID = 13 + Dot11InformationElementIDTrafficClass Dot11InformationElementID = 14 + Dot11InformationElementIDSchedule Dot11InformationElementID = 15 + Dot11InformationElementIDChallenge Dot11InformationElementID = 16 + Dot11InformationElementIDPowerConst Dot11InformationElementID = 32 + Dot11InformationElementIDPowerCapability Dot11InformationElementID = 33 + Dot11InformationElementIDTPCRequest Dot11InformationElementID = 34 + Dot11InformationElementIDTPCReport Dot11InformationElementID = 35 + Dot11InformationElementIDSupportedChannels Dot11InformationElementID = 36 + Dot11InformationElementIDSwitchChannelAnnounce Dot11InformationElementID = 37 + Dot11InformationElementIDMeasureRequest Dot11InformationElementID = 38 + Dot11InformationElementIDMeasureReport Dot11InformationElementID = 39 + Dot11InformationElementIDQuiet Dot11InformationElementID = 40 + Dot11InformationElementIDIBSSDFS Dot11InformationElementID = 41 + Dot11InformationElementIDERPInfo Dot11InformationElementID = 42 + Dot11InformationElementIDTSDelay Dot11InformationElementID = 43 + Dot11InformationElementIDTCLASProcessing Dot11InformationElementID = 44 + Dot11InformationElementIDHTCapabilities Dot11InformationElementID = 45 + Dot11InformationElementIDQOSCapability Dot11InformationElementID = 46 + Dot11InformationElementIDERPInfo2 Dot11InformationElementID = 47 + Dot11InformationElementIDRSNInfo Dot11InformationElementID = 48 + Dot11InformationElementIDESRates Dot11InformationElementID = 50 + Dot11InformationElementIDAPChannelReport Dot11InformationElementID = 51 + Dot11InformationElementIDNeighborReport Dot11InformationElementID = 52 + Dot11InformationElementIDRCPI Dot11InformationElementID = 53 + Dot11InformationElementIDMobilityDomain Dot11InformationElementID = 54 + Dot11InformationElementIDFastBSSTrans Dot11InformationElementID = 55 + Dot11InformationElementIDTimeoutInt Dot11InformationElementID = 56 + Dot11InformationElementIDRICData Dot11InformationElementID = 57 + Dot11InformationElementIDDSERegisteredLoc Dot11InformationElementID = 58 + Dot11InformationElementIDSuppOperatingClass Dot11InformationElementID = 59 + Dot11InformationElementIDExtChanSwitchAnnounce Dot11InformationElementID = 60 + Dot11InformationElementIDHTInfo Dot11InformationElementID = 61 + Dot11InformationElementIDSecChanOffset Dot11InformationElementID = 62 + Dot11InformationElementIDBSSAverageAccessDelay Dot11InformationElementID = 63 + Dot11InformationElementIDAntenna Dot11InformationElementID = 64 + Dot11InformationElementIDRSNI Dot11InformationElementID = 65 + Dot11InformationElementIDMeasurePilotTrans Dot11InformationElementID = 66 + Dot11InformationElementIDBSSAvailAdmCapacity Dot11InformationElementID = 67 + Dot11InformationElementIDBSSACAccDelayWAPIParam Dot11InformationElementID = 68 + Dot11InformationElementIDTimeAdvertisement Dot11InformationElementID = 69 + Dot11InformationElementIDRMEnabledCapabilities Dot11InformationElementID = 70 + Dot11InformationElementIDMultipleBSSID Dot11InformationElementID = 71 + Dot11InformationElementID2040BSSCoExist Dot11InformationElementID = 72 + Dot11InformationElementID2040BSSIntChanReport Dot11InformationElementID = 73 + Dot11InformationElementIDOverlapBSSScanParam Dot11InformationElementID = 74 + Dot11InformationElementIDRICDescriptor Dot11InformationElementID = 75 + Dot11InformationElementIDManagementMIC Dot11InformationElementID = 76 + Dot11InformationElementIDEventRequest Dot11InformationElementID = 78 + Dot11InformationElementIDEventReport Dot11InformationElementID = 79 + Dot11InformationElementIDDiagnosticRequest Dot11InformationElementID = 80 + Dot11InformationElementIDDiagnosticReport Dot11InformationElementID = 81 + Dot11InformationElementIDLocationParam Dot11InformationElementID = 82 + Dot11InformationElementIDNonTransBSSIDCapability Dot11InformationElementID = 83 + Dot11InformationElementIDSSIDList Dot11InformationElementID = 84 + Dot11InformationElementIDMultipleBSSIDIndex Dot11InformationElementID = 85 + Dot11InformationElementIDFMSDescriptor Dot11InformationElementID = 86 + Dot11InformationElementIDFMSRequest Dot11InformationElementID = 87 + Dot11InformationElementIDFMSResponse Dot11InformationElementID = 88 + Dot11InformationElementIDQOSTrafficCapability Dot11InformationElementID = 89 + Dot11InformationElementIDBSSMaxIdlePeriod Dot11InformationElementID = 90 + Dot11InformationElementIDTFSRequest Dot11InformationElementID = 91 + Dot11InformationElementIDTFSResponse Dot11InformationElementID = 92 + Dot11InformationElementIDWNMSleepMode Dot11InformationElementID = 93 + Dot11InformationElementIDTIMBroadcastRequest Dot11InformationElementID = 94 + Dot11InformationElementIDTIMBroadcastResponse Dot11InformationElementID = 95 + Dot11InformationElementIDCollInterferenceReport Dot11InformationElementID = 96 + Dot11InformationElementIDChannelUsage Dot11InformationElementID = 97 + Dot11InformationElementIDTimeZone Dot11InformationElementID = 98 + Dot11InformationElementIDDMSRequest Dot11InformationElementID = 99 + Dot11InformationElementIDDMSResponse Dot11InformationElementID = 100 + Dot11InformationElementIDLinkIdentifier Dot11InformationElementID = 101 + Dot11InformationElementIDWakeupSchedule Dot11InformationElementID = 102 + Dot11InformationElementIDChannelSwitchTiming Dot11InformationElementID = 104 + Dot11InformationElementIDPTIControl Dot11InformationElementID = 105 + Dot11InformationElementIDPUBufferStatus Dot11InformationElementID = 106 + Dot11InformationElementIDInterworking Dot11InformationElementID = 107 + Dot11InformationElementIDAdvertisementProtocol Dot11InformationElementID = 108 + Dot11InformationElementIDExpBWRequest Dot11InformationElementID = 109 + Dot11InformationElementIDQOSMapSet Dot11InformationElementID = 110 + Dot11InformationElementIDRoamingConsortium Dot11InformationElementID = 111 + Dot11InformationElementIDEmergencyAlertIdentifier Dot11InformationElementID = 112 + Dot11InformationElementIDMeshConfiguration Dot11InformationElementID = 113 + Dot11InformationElementIDMeshID Dot11InformationElementID = 114 + Dot11InformationElementIDMeshLinkMetricReport Dot11InformationElementID = 115 + Dot11InformationElementIDCongestionNotification Dot11InformationElementID = 116 + Dot11InformationElementIDMeshPeeringManagement Dot11InformationElementID = 117 + Dot11InformationElementIDMeshChannelSwitchParam Dot11InformationElementID = 118 + Dot11InformationElementIDMeshAwakeWindows Dot11InformationElementID = 119 + Dot11InformationElementIDBeaconTiming Dot11InformationElementID = 120 + Dot11InformationElementIDMCCAOPSetupRequest Dot11InformationElementID = 121 + Dot11InformationElementIDMCCAOPSetupReply Dot11InformationElementID = 122 + Dot11InformationElementIDMCCAOPAdvertisement Dot11InformationElementID = 123 + Dot11InformationElementIDMCCAOPTeardown Dot11InformationElementID = 124 + Dot11InformationElementIDGateAnnouncement Dot11InformationElementID = 125 + Dot11InformationElementIDRootAnnouncement Dot11InformationElementID = 126 + Dot11InformationElementIDExtCapability Dot11InformationElementID = 127 + Dot11InformationElementIDAgereProprietary Dot11InformationElementID = 128 + Dot11InformationElementIDPathRequest Dot11InformationElementID = 130 + Dot11InformationElementIDPathReply Dot11InformationElementID = 131 + Dot11InformationElementIDPathError Dot11InformationElementID = 132 + Dot11InformationElementIDCiscoCCX1CKIPDeviceName Dot11InformationElementID = 133 + Dot11InformationElementIDCiscoCCX2 Dot11InformationElementID = 136 + Dot11InformationElementIDProxyUpdate Dot11InformationElementID = 137 + Dot11InformationElementIDProxyUpdateConfirmation Dot11InformationElementID = 138 + Dot11InformationElementIDAuthMeshPerringExch Dot11InformationElementID = 139 + Dot11InformationElementIDMIC Dot11InformationElementID = 140 + Dot11InformationElementIDDestinationURI Dot11InformationElementID = 141 + Dot11InformationElementIDUAPSDCoexistence Dot11InformationElementID = 142 + Dot11InformationElementIDWakeupSchedule80211ad Dot11InformationElementID = 143 + Dot11InformationElementIDExtendedSchedule Dot11InformationElementID = 144 + Dot11InformationElementIDSTAAvailability Dot11InformationElementID = 145 + Dot11InformationElementIDDMGTSPEC Dot11InformationElementID = 146 + Dot11InformationElementIDNextDMGATI Dot11InformationElementID = 147 + Dot11InformationElementIDDMSCapabilities Dot11InformationElementID = 148 + Dot11InformationElementIDCiscoUnknown95 Dot11InformationElementID = 149 + Dot11InformationElementIDVendor2 Dot11InformationElementID = 150 + Dot11InformationElementIDDMGOperating Dot11InformationElementID = 151 + Dot11InformationElementIDDMGBSSParamChange Dot11InformationElementID = 152 + Dot11InformationElementIDDMGBeamRefinement Dot11InformationElementID = 153 + Dot11InformationElementIDChannelMeasFeedback Dot11InformationElementID = 154 + Dot11InformationElementIDAwakeWindow Dot11InformationElementID = 157 + Dot11InformationElementIDMultiBand Dot11InformationElementID = 158 + Dot11InformationElementIDADDBAExtension Dot11InformationElementID = 159 + Dot11InformationElementIDNEXTPCPList Dot11InformationElementID = 160 + Dot11InformationElementIDPCPHandover Dot11InformationElementID = 161 + Dot11InformationElementIDDMGLinkMargin Dot11InformationElementID = 162 + Dot11InformationElementIDSwitchingStream Dot11InformationElementID = 163 + Dot11InformationElementIDSessionTransmission Dot11InformationElementID = 164 + Dot11InformationElementIDDynamicTonePairReport Dot11InformationElementID = 165 + Dot11InformationElementIDClusterReport Dot11InformationElementID = 166 + Dot11InformationElementIDRelayCapabilities Dot11InformationElementID = 167 + Dot11InformationElementIDRelayTransferParameter Dot11InformationElementID = 168 + Dot11InformationElementIDBeamlinkMaintenance Dot11InformationElementID = 169 + Dot11InformationElementIDMultipleMacSublayers Dot11InformationElementID = 170 + Dot11InformationElementIDUPID Dot11InformationElementID = 171 + Dot11InformationElementIDDMGLinkAdaptionAck Dot11InformationElementID = 172 + Dot11InformationElementIDSymbolProprietary Dot11InformationElementID = 173 + Dot11InformationElementIDMCCAOPAdvertOverview Dot11InformationElementID = 174 + Dot11InformationElementIDQuietPeriodRequest Dot11InformationElementID = 175 + Dot11InformationElementIDQuietPeriodResponse Dot11InformationElementID = 177 + Dot11InformationElementIDECPACPolicy Dot11InformationElementID = 182 + Dot11InformationElementIDClusterTimeOffset Dot11InformationElementID = 183 + Dot11InformationElementIDAntennaSectorID Dot11InformationElementID = 190 + Dot11InformationElementIDVHTCapabilities Dot11InformationElementID = 191 + Dot11InformationElementIDVHTOperation Dot11InformationElementID = 192 + Dot11InformationElementIDExtendedBSSLoad Dot11InformationElementID = 193 + Dot11InformationElementIDWideBWChannelSwitch Dot11InformationElementID = 194 + Dot11InformationElementIDVHTTxPowerEnvelope Dot11InformationElementID = 195 + Dot11InformationElementIDChannelSwitchWrapper Dot11InformationElementID = 196 + Dot11InformationElementIDOperatingModeNotification Dot11InformationElementID = 199 + Dot11InformationElementIDUPSIM Dot11InformationElementID = 200 + Dot11InformationElementIDReducedNeighborReport Dot11InformationElementID = 201 + Dot11InformationElementIDTVHTOperation Dot11InformationElementID = 202 + Dot11InformationElementIDDeviceLocation Dot11InformationElementID = 204 + Dot11InformationElementIDWhiteSpaceMap Dot11InformationElementID = 205 + Dot11InformationElementIDFineTuningMeasureParams Dot11InformationElementID = 206 + Dot11InformationElementIDVendor Dot11InformationElementID = 221 +) + +// String provides a human readable string for Dot11InformationElementID. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the Dot11InformationElementID value, +// not its string. +func (a Dot11InformationElementID) String() string { + switch a { + case Dot11InformationElementIDSSID: + return "SSID parameter set" + case Dot11InformationElementIDRates: + return "Supported Rates" + case Dot11InformationElementIDFHSet: + return "FH Parameter set" + case Dot11InformationElementIDDSSet: + return "DS Parameter set" + case Dot11InformationElementIDCFSet: + return "CF Parameter set" + case Dot11InformationElementIDTIM: + return "Traffic Indication Map (TIM)" + case Dot11InformationElementIDIBSSSet: + return "IBSS Parameter set" + case Dot11InformationElementIDCountryInfo: + return "Country Information" + case Dot11InformationElementIDHoppingPatternParam: + return "Hopping Pattern Parameters" + case Dot11InformationElementIDHoppingPatternTable: + return "Hopping Pattern Table" + case Dot11InformationElementIDRequest: + return "Request" + case Dot11InformationElementIDQBSSLoadElem: + return "QBSS Load Element" + case Dot11InformationElementIDEDCAParamSet: + return "EDCA Parameter Set" + case Dot11InformationElementIDTrafficSpec: + return "Traffic Specification" + case Dot11InformationElementIDTrafficClass: + return "Traffic Classification" + case Dot11InformationElementIDSchedule: + return "Schedule" + case Dot11InformationElementIDChallenge: + return "Challenge text" + case Dot11InformationElementIDPowerConst: + return "Power Constraint" + case Dot11InformationElementIDPowerCapability: + return "Power Capability" + case Dot11InformationElementIDTPCRequest: + return "TPC Request" + case Dot11InformationElementIDTPCReport: + return "TPC Report" + case Dot11InformationElementIDSupportedChannels: + return "Supported Channels" + case Dot11InformationElementIDSwitchChannelAnnounce: + return "Channel Switch Announcement" + case Dot11InformationElementIDMeasureRequest: + return "Measurement Request" + case Dot11InformationElementIDMeasureReport: + return "Measurement Report" + case Dot11InformationElementIDQuiet: + return "Quiet" + case Dot11InformationElementIDIBSSDFS: + return "IBSS DFS" + case Dot11InformationElementIDERPInfo: + return "ERP Information" + case Dot11InformationElementIDTSDelay: + return "TS Delay" + case Dot11InformationElementIDTCLASProcessing: + return "TCLAS Processing" + case Dot11InformationElementIDHTCapabilities: + return "HT Capabilities (802.11n D1.10)" + case Dot11InformationElementIDQOSCapability: + return "QOS Capability" + case Dot11InformationElementIDERPInfo2: + return "ERP Information-2" + case Dot11InformationElementIDRSNInfo: + return "RSN Information" + case Dot11InformationElementIDESRates: + return "Extended Supported Rates" + case Dot11InformationElementIDAPChannelReport: + return "AP Channel Report" + case Dot11InformationElementIDNeighborReport: + return "Neighbor Report" + case Dot11InformationElementIDRCPI: + return "RCPI" + case Dot11InformationElementIDMobilityDomain: + return "Mobility Domain" + case Dot11InformationElementIDFastBSSTrans: + return "Fast BSS Transition" + case Dot11InformationElementIDTimeoutInt: + return "Timeout Interval" + case Dot11InformationElementIDRICData: + return "RIC Data" + case Dot11InformationElementIDDSERegisteredLoc: + return "DSE Registered Location" + case Dot11InformationElementIDSuppOperatingClass: + return "Supported Operating Classes" + case Dot11InformationElementIDExtChanSwitchAnnounce: + return "Extended Channel Switch Announcement" + case Dot11InformationElementIDHTInfo: + return "HT Information (802.11n D1.10)" + case Dot11InformationElementIDSecChanOffset: + return "Secondary Channel Offset (802.11n D1.10)" + case Dot11InformationElementIDBSSAverageAccessDelay: + return "BSS Average Access Delay" + case Dot11InformationElementIDAntenna: + return "Antenna" + case Dot11InformationElementIDRSNI: + return "RSNI" + case Dot11InformationElementIDMeasurePilotTrans: + return "Measurement Pilot Transmission" + case Dot11InformationElementIDBSSAvailAdmCapacity: + return "BSS Available Admission Capacity" + case Dot11InformationElementIDBSSACAccDelayWAPIParam: + return "BSS AC Access Delay/WAPI Parameter Set" + case Dot11InformationElementIDTimeAdvertisement: + return "Time Advertisement" + case Dot11InformationElementIDRMEnabledCapabilities: + return "RM Enabled Capabilities" + case Dot11InformationElementIDMultipleBSSID: + return "Multiple BSSID" + case Dot11InformationElementID2040BSSCoExist: + return "20/40 BSS Coexistence" + case Dot11InformationElementID2040BSSIntChanReport: + return "20/40 BSS Intolerant Channel Report" + case Dot11InformationElementIDOverlapBSSScanParam: + return "Overlapping BSS Scan Parameters" + case Dot11InformationElementIDRICDescriptor: + return "RIC Descriptor" + case Dot11InformationElementIDManagementMIC: + return "Management MIC" + case Dot11InformationElementIDEventRequest: + return "Event Request" + case Dot11InformationElementIDEventReport: + return "Event Report" + case Dot11InformationElementIDDiagnosticRequest: + return "Diagnostic Request" + case Dot11InformationElementIDDiagnosticReport: + return "Diagnostic Report" + case Dot11InformationElementIDLocationParam: + return "Location Parameters" + case Dot11InformationElementIDNonTransBSSIDCapability: + return "Non Transmitted BSSID Capability" + case Dot11InformationElementIDSSIDList: + return "SSID List" + case Dot11InformationElementIDMultipleBSSIDIndex: + return "Multiple BSSID Index" + case Dot11InformationElementIDFMSDescriptor: + return "FMS Descriptor" + case Dot11InformationElementIDFMSRequest: + return "FMS Request" + case Dot11InformationElementIDFMSResponse: + return "FMS Response" + case Dot11InformationElementIDQOSTrafficCapability: + return "QoS Traffic Capability" + case Dot11InformationElementIDBSSMaxIdlePeriod: + return "BSS Max Idle Period" + case Dot11InformationElementIDTFSRequest: + return "TFS Request" + case Dot11InformationElementIDTFSResponse: + return "TFS Response" + case Dot11InformationElementIDWNMSleepMode: + return "WNM-Sleep Mode" + case Dot11InformationElementIDTIMBroadcastRequest: + return "TIM Broadcast Request" + case Dot11InformationElementIDTIMBroadcastResponse: + return "TIM Broadcast Response" + case Dot11InformationElementIDCollInterferenceReport: + return "Collocated Interference Report" + case Dot11InformationElementIDChannelUsage: + return "Channel Usage" + case Dot11InformationElementIDTimeZone: + return "Time Zone" + case Dot11InformationElementIDDMSRequest: + return "DMS Request" + case Dot11InformationElementIDDMSResponse: + return "DMS Response" + case Dot11InformationElementIDLinkIdentifier: + return "Link Identifier" + case Dot11InformationElementIDWakeupSchedule: + return "Wakeup Schedule" + case Dot11InformationElementIDChannelSwitchTiming: + return "Channel Switch Timing" + case Dot11InformationElementIDPTIControl: + return "PTI Control" + case Dot11InformationElementIDPUBufferStatus: + return "PU Buffer Status" + case Dot11InformationElementIDInterworking: + return "Interworking" + case Dot11InformationElementIDAdvertisementProtocol: + return "Advertisement Protocol" + case Dot11InformationElementIDExpBWRequest: + return "Expedited Bandwidth Request" + case Dot11InformationElementIDQOSMapSet: + return "QoS Map Set" + case Dot11InformationElementIDRoamingConsortium: + return "Roaming Consortium" + case Dot11InformationElementIDEmergencyAlertIdentifier: + return "Emergency Alert Identifier" + case Dot11InformationElementIDMeshConfiguration: + return "Mesh Configuration" + case Dot11InformationElementIDMeshID: + return "Mesh ID" + case Dot11InformationElementIDMeshLinkMetricReport: + return "Mesh Link Metric Report" + case Dot11InformationElementIDCongestionNotification: + return "Congestion Notification" + case Dot11InformationElementIDMeshPeeringManagement: + return "Mesh Peering Management" + case Dot11InformationElementIDMeshChannelSwitchParam: + return "Mesh Channel Switch Parameters" + case Dot11InformationElementIDMeshAwakeWindows: + return "Mesh Awake Windows" + case Dot11InformationElementIDBeaconTiming: + return "Beacon Timing" + case Dot11InformationElementIDMCCAOPSetupRequest: + return "MCCAOP Setup Request" + case Dot11InformationElementIDMCCAOPSetupReply: + return "MCCAOP SETUP Reply" + case Dot11InformationElementIDMCCAOPAdvertisement: + return "MCCAOP Advertisement" + case Dot11InformationElementIDMCCAOPTeardown: + return "MCCAOP Teardown" + case Dot11InformationElementIDGateAnnouncement: + return "Gate Announcement" + case Dot11InformationElementIDRootAnnouncement: + return "Root Announcement" + case Dot11InformationElementIDExtCapability: + return "Extended Capabilities" + case Dot11InformationElementIDAgereProprietary: + return "Agere Proprietary" + case Dot11InformationElementIDPathRequest: + return "Path Request" + case Dot11InformationElementIDPathReply: + return "Path Reply" + case Dot11InformationElementIDPathError: + return "Path Error" + case Dot11InformationElementIDCiscoCCX1CKIPDeviceName: + return "Cisco CCX1 CKIP + Device Name" + case Dot11InformationElementIDCiscoCCX2: + return "Cisco CCX2" + case Dot11InformationElementIDProxyUpdate: + return "Proxy Update" + case Dot11InformationElementIDProxyUpdateConfirmation: + return "Proxy Update Confirmation" + case Dot11InformationElementIDAuthMeshPerringExch: + return "Auhenticated Mesh Perring Exchange" + case Dot11InformationElementIDMIC: + return "MIC (Message Integrity Code)" + case Dot11InformationElementIDDestinationURI: + return "Destination URI" + case Dot11InformationElementIDUAPSDCoexistence: + return "U-APSD Coexistence" + case Dot11InformationElementIDWakeupSchedule80211ad: + return "Wakeup Schedule 802.11ad" + case Dot11InformationElementIDExtendedSchedule: + return "Extended Schedule" + case Dot11InformationElementIDSTAAvailability: + return "STA Availability" + case Dot11InformationElementIDDMGTSPEC: + return "DMG TSPEC" + case Dot11InformationElementIDNextDMGATI: + return "Next DMG ATI" + case Dot11InformationElementIDDMSCapabilities: + return "DMG Capabilities" + case Dot11InformationElementIDCiscoUnknown95: + return "Cisco Unknown 95" + case Dot11InformationElementIDVendor2: + return "Vendor Specific" + case Dot11InformationElementIDDMGOperating: + return "DMG Operating" + case Dot11InformationElementIDDMGBSSParamChange: + return "DMG BSS Parameter Change" + case Dot11InformationElementIDDMGBeamRefinement: + return "DMG Beam Refinement" + case Dot11InformationElementIDChannelMeasFeedback: + return "Channel Measurement Feedback" + case Dot11InformationElementIDAwakeWindow: + return "Awake Window" + case Dot11InformationElementIDMultiBand: + return "Multi Band" + case Dot11InformationElementIDADDBAExtension: + return "ADDBA Extension" + case Dot11InformationElementIDNEXTPCPList: + return "NEXTPCP List" + case Dot11InformationElementIDPCPHandover: + return "PCP Handover" + case Dot11InformationElementIDDMGLinkMargin: + return "DMG Link Margin" + case Dot11InformationElementIDSwitchingStream: + return "Switching Stream" + case Dot11InformationElementIDSessionTransmission: + return "Session Transmission" + case Dot11InformationElementIDDynamicTonePairReport: + return "Dynamic Tone Pairing Report" + case Dot11InformationElementIDClusterReport: + return "Cluster Report" + case Dot11InformationElementIDRelayCapabilities: + return "Relay Capabilities" + case Dot11InformationElementIDRelayTransferParameter: + return "Relay Transfer Parameter" + case Dot11InformationElementIDBeamlinkMaintenance: + return "Beamlink Maintenance" + case Dot11InformationElementIDMultipleMacSublayers: + return "Multiple MAC Sublayers" + case Dot11InformationElementIDUPID: + return "U-PID" + case Dot11InformationElementIDDMGLinkAdaptionAck: + return "DMG Link Adaption Acknowledgment" + case Dot11InformationElementIDSymbolProprietary: + return "Symbol Proprietary" + case Dot11InformationElementIDMCCAOPAdvertOverview: + return "MCCAOP Advertisement Overview" + case Dot11InformationElementIDQuietPeriodRequest: + return "Quiet Period Request" + case Dot11InformationElementIDQuietPeriodResponse: + return "Quiet Period Response" + case Dot11InformationElementIDECPACPolicy: + return "ECPAC Policy" + case Dot11InformationElementIDClusterTimeOffset: + return "Cluster Time Offset" + case Dot11InformationElementIDAntennaSectorID: + return "Antenna Sector ID" + case Dot11InformationElementIDVHTCapabilities: + return "VHT Capabilities (IEEE Std 802.11ac/D3.1)" + case Dot11InformationElementIDVHTOperation: + return "VHT Operation (IEEE Std 802.11ac/D3.1)" + case Dot11InformationElementIDExtendedBSSLoad: + return "Extended BSS Load" + case Dot11InformationElementIDWideBWChannelSwitch: + return "Wide Bandwidth Channel Switch" + case Dot11InformationElementIDVHTTxPowerEnvelope: + return "VHT Tx Power Envelope (IEEE Std 802.11ac/D5.0)" + case Dot11InformationElementIDChannelSwitchWrapper: + return "Channel Switch Wrapper" + case Dot11InformationElementIDOperatingModeNotification: + return "Operating Mode Notification" + case Dot11InformationElementIDUPSIM: + return "UP SIM" + case Dot11InformationElementIDReducedNeighborReport: + return "Reduced Neighbor Report" + case Dot11InformationElementIDTVHTOperation: + return "TVHT Op" + case Dot11InformationElementIDDeviceLocation: + return "Device Location" + case Dot11InformationElementIDWhiteSpaceMap: + return "White Space Map" + case Dot11InformationElementIDFineTuningMeasureParams: + return "Fine Tuning Measure Parameters" + case Dot11InformationElementIDVendor: + return "Vendor" + default: + return "Unknown information element id" + } +} + +// Dot11 provides an IEEE 802.11 base packet header. +// See http://standards.ieee.org/findstds/standard/802.11-2012.html +// for excruciating detail. +type Dot11 struct { + BaseLayer + Type Dot11Type + Proto uint8 + Flags Dot11Flags + DurationID uint16 + Address1 net.HardwareAddr + Address2 net.HardwareAddr + Address3 net.HardwareAddr + Address4 net.HardwareAddr + SequenceNumber uint16 + FragmentNumber uint16 + Checksum uint32 + QOS *Dot11QOS + HTControl *Dot11HTControl + DataLayer gopacket.Layer +} + +type Dot11QOS struct { + TID uint8 /* Traffic IDentifier */ + EOSP bool /* End of service period */ + AckPolicy Dot11AckPolicy + TXOP uint8 +} + +type Dot11HTControl struct { + ACConstraint bool + RDGMorePPDU bool + + VHT *Dot11HTControlVHT + HT *Dot11HTControlHT +} + +type Dot11HTControlHT struct { + LinkAdapationControl *Dot11LinkAdapationControl + CalibrationPosition uint8 + CalibrationSequence uint8 + CSISteering uint8 + NDPAnnouncement bool + DEI bool +} + +type Dot11HTControlVHT struct { + MRQ bool + UnsolicitedMFB bool + MSI *uint8 + MFB Dot11HTControlMFB + CompressedMSI *uint8 + STBCIndication bool + MFSI *uint8 + GID *uint8 + CodingType *Dot11CodingType + FbTXBeamformed bool +} + +type Dot11HTControlMFB struct { + NumSTS uint8 + VHTMCS uint8 + BW uint8 + SNR int8 +} + +type Dot11LinkAdapationControl struct { + TRQ bool + MRQ bool + MSI uint8 + MFSI uint8 + ASEL *Dot11ASEL + MFB *uint8 +} + +type Dot11ASEL struct { + Command uint8 + Data uint8 +} + +type Dot11CodingType uint8 + +const ( + Dot11CodingTypeBCC = 0 + Dot11CodingTypeLDPC = 1 +) + +func (a Dot11CodingType) String() string { + switch a { + case Dot11CodingTypeBCC: + return "BCC" + case Dot11CodingTypeLDPC: + return "LDPC" + default: + return "Unknown coding type" + } +} + +func (m *Dot11HTControlMFB) NoFeedBackPresent() bool { + return m.VHTMCS == 15 && m.NumSTS == 7 +} + +func decodeDot11(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11{} + err := d.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(d) + if d.DataLayer != nil { + p.AddLayer(d.DataLayer) + } + return p.NextDecoder(d.NextLayerType()) +} + +func (m *Dot11) LayerType() gopacket.LayerType { return LayerTypeDot11 } +func (m *Dot11) CanDecode() gopacket.LayerClass { return LayerTypeDot11 } +func (m *Dot11) NextLayerType() gopacket.LayerType { + if m.DataLayer != nil { + if m.Flags.WEP() { + return LayerTypeDot11WEP + } + return m.DataLayer.(gopacket.DecodingLayer).NextLayerType() + } + return m.Type.LayerType() +} + +func createU8(x uint8) *uint8 { + return &x +} + +var dataDecodeMap = map[Dot11Type]func() gopacket.DecodingLayer{ + Dot11TypeData: func() gopacket.DecodingLayer { return &Dot11Data{} }, + Dot11TypeDataCFAck: func() gopacket.DecodingLayer { return &Dot11DataCFAck{} }, + Dot11TypeDataCFPoll: func() gopacket.DecodingLayer { return &Dot11DataCFPoll{} }, + Dot11TypeDataCFAckPoll: func() gopacket.DecodingLayer { return &Dot11DataCFAckPoll{} }, + Dot11TypeDataNull: func() gopacket.DecodingLayer { return &Dot11DataNull{} }, + Dot11TypeDataCFAckNoData: func() gopacket.DecodingLayer { return &Dot11DataCFAckNoData{} }, + Dot11TypeDataCFPollNoData: func() gopacket.DecodingLayer { return &Dot11DataCFPollNoData{} }, + Dot11TypeDataCFAckPollNoData: func() gopacket.DecodingLayer { return &Dot11DataCFAckPollNoData{} }, + Dot11TypeDataQOSData: func() gopacket.DecodingLayer { return &Dot11DataQOSData{} }, + Dot11TypeDataQOSDataCFAck: func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFAck{} }, + Dot11TypeDataQOSDataCFPoll: func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFPoll{} }, + Dot11TypeDataQOSDataCFAckPoll: func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFAckPoll{} }, + Dot11TypeDataQOSNull: func() gopacket.DecodingLayer { return &Dot11DataQOSNull{} }, + Dot11TypeDataQOSCFPollNoData: func() gopacket.DecodingLayer { return &Dot11DataQOSCFPollNoData{} }, + Dot11TypeDataQOSCFAckPollNoData: func() gopacket.DecodingLayer { return &Dot11DataQOSCFAckPollNoData{} }, +} + +func (m *Dot11) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 10 { + df.SetTruncated() + return fmt.Errorf("Dot11 length %v too short, %v required", len(data), 10) + } + m.Type = Dot11Type((data[0])&0xFC) >> 2 + + m.DataLayer = nil + m.Proto = uint8(data[0]) & 0x0003 + m.Flags = Dot11Flags(data[1]) + m.DurationID = binary.LittleEndian.Uint16(data[2:4]) + m.Address1 = net.HardwareAddr(data[4:10]) + + offset := 10 + + mainType := m.Type.MainType() + + switch mainType { + case Dot11TypeCtrl: + switch m.Type { + case Dot11TypeCtrlRTS, Dot11TypeCtrlPowersavePoll, Dot11TypeCtrlCFEnd, Dot11TypeCtrlCFEndAck: + if len(data) < offset+6 { + df.SetTruncated() + return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6) + } + m.Address2 = net.HardwareAddr(data[offset : offset+6]) + offset += 6 + } + case Dot11TypeMgmt, Dot11TypeData: + if len(data) < offset+14 { + df.SetTruncated() + return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+14) + } + m.Address2 = net.HardwareAddr(data[offset : offset+6]) + offset += 6 + m.Address3 = net.HardwareAddr(data[offset : offset+6]) + offset += 6 + + m.SequenceNumber = (binary.LittleEndian.Uint16(data[offset:offset+2]) & 0xFFF0) >> 4 + m.FragmentNumber = (binary.LittleEndian.Uint16(data[offset:offset+2]) & 0x000F) + offset += 2 + } + + if mainType == Dot11TypeData && m.Flags.FromDS() && m.Flags.ToDS() { + if len(data) < offset+6 { + df.SetTruncated() + return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6) + } + m.Address4 = net.HardwareAddr(data[offset : offset+6]) + offset += 6 + } + + if m.Type.QOS() { + if len(data) < offset+2 { + df.SetTruncated() + return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6) + } + m.QOS = &Dot11QOS{ + TID: (uint8(data[offset]) & 0x0F), + EOSP: (uint8(data[offset]) & 0x10) == 0x10, + AckPolicy: Dot11AckPolicy((uint8(data[offset]) & 0x60) >> 5), + TXOP: uint8(data[offset+1]), + } + offset += 2 + } + if m.Flags.Order() && (m.Type.QOS() || mainType == Dot11TypeMgmt) { + if len(data) < offset+4 { + df.SetTruncated() + return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6) + } + + htc := &Dot11HTControl{ + ACConstraint: data[offset+3]&0x40 != 0, + RDGMorePPDU: data[offset+3]&0x80 != 0, + } + m.HTControl = htc + + if data[offset]&0x1 != 0 { // VHT Variant + vht := &Dot11HTControlVHT{} + htc.VHT = vht + vht.MRQ = data[offset]&0x4 != 0 + vht.UnsolicitedMFB = data[offset+3]&0x20 != 0 + vht.MFB = Dot11HTControlMFB{ + NumSTS: uint8(data[offset+1] >> 1 & 0x7), + VHTMCS: uint8(data[offset+1] >> 4 & 0xF), + BW: uint8(data[offset+2] & 0x3), + SNR: int8((-(data[offset+2] >> 2 & 0x20))+data[offset+2]>>2&0x1F) + 22, + } + + if vht.UnsolicitedMFB { + if !vht.MFB.NoFeedBackPresent() { + vht.CompressedMSI = createU8(data[offset] >> 3 & 0x3) + vht.STBCIndication = data[offset]&0x20 != 0 + vht.CodingType = (*Dot11CodingType)(createU8(data[offset+3] >> 3 & 0x1)) + vht.FbTXBeamformed = data[offset+3]&0x10 != 0 + vht.GID = createU8( + data[offset]>>6 + + (data[offset+1] & 0x1 << 2) + + data[offset+3]&0x7<<3) + } + } else { + if vht.MRQ { + vht.MSI = createU8((data[offset] >> 3) & 0x07) + } + vht.MFSI = createU8(data[offset]>>6 + (data[offset+1] & 0x1 << 2)) + } + + } else { // HT Variant + ht := &Dot11HTControlHT{} + htc.HT = ht + + lac := &Dot11LinkAdapationControl{} + ht.LinkAdapationControl = lac + lac.TRQ = data[offset]&0x2 != 0 + lac.MFSI = data[offset]>>6&0x3 + data[offset+1]&0x1<<3 + if data[offset]&0x3C == 0x38 { // ASEL + lac.ASEL = &Dot11ASEL{ + Command: data[offset+1] >> 1 & 0x7, + Data: data[offset+1] >> 4 & 0xF, + } + } else { + lac.MRQ = data[offset]&0x4 != 0 + if lac.MRQ { + lac.MSI = data[offset] >> 3 & 0x7 + } + lac.MFB = createU8(data[offset+1] >> 1) + } + ht.CalibrationPosition = data[offset+2] & 0x3 + ht.CalibrationSequence = data[offset+2] >> 2 & 0x3 + ht.CSISteering = data[offset+2] >> 6 & 0x3 + ht.NDPAnnouncement = data[offset+3]&0x1 != 0 + if mainType != Dot11TypeMgmt { + ht.DEI = data[offset+3]&0x20 != 0 + } + } + + offset += 4 + } + + if len(data) < offset+4 { + df.SetTruncated() + return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+4) + } + + m.BaseLayer = BaseLayer{ + Contents: data[0:offset], + Payload: data[offset : len(data)-4], + } + + if mainType == Dot11TypeData { + d := dataDecodeMap[m.Type] + if d == nil { + return fmt.Errorf("unsupported type: %v", m.Type) + } + l := d() + err := l.DecodeFromBytes(m.BaseLayer.Payload, df) + if err != nil { + return err + } + m.DataLayer = l.(gopacket.Layer) + } + + m.Checksum = binary.LittleEndian.Uint32(data[len(data)-4 : len(data)]) + return nil +} + +func (m *Dot11) ChecksumValid() bool { + // only for CTRL and MGMT frames + h := crc32.NewIEEE() + h.Write(m.Contents) + h.Write(m.Payload) + return m.Checksum == h.Sum32() +} + +func (m Dot11) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(24) + + if err != nil { + return err + } + + buf[0] = (uint8(m.Type) << 2) | m.Proto + buf[1] = uint8(m.Flags) + + binary.LittleEndian.PutUint16(buf[2:4], m.DurationID) + + copy(buf[4:10], m.Address1) + + offset := 10 + + switch m.Type.MainType() { + case Dot11TypeCtrl: + switch m.Type { + case Dot11TypeCtrlRTS, Dot11TypeCtrlPowersavePoll, Dot11TypeCtrlCFEnd, Dot11TypeCtrlCFEndAck: + copy(buf[offset:offset+6], m.Address2) + offset += 6 + } + case Dot11TypeMgmt, Dot11TypeData: + copy(buf[offset:offset+6], m.Address2) + offset += 6 + copy(buf[offset:offset+6], m.Address3) + offset += 6 + + binary.LittleEndian.PutUint16(buf[offset:offset+2], (m.SequenceNumber<<4)|m.FragmentNumber) + offset += 2 + } + + if m.Type.MainType() == Dot11TypeData && m.Flags.FromDS() && m.Flags.ToDS() { + copy(buf[offset:offset+6], m.Address4) + offset += 6 + } + + return nil +} + +// Dot11Mgmt is a base for all IEEE 802.11 management layers. +type Dot11Mgmt struct { + BaseLayer +} + +func (m *Dot11Mgmt) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload } +func (m *Dot11Mgmt) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +// Dot11Ctrl is a base for all IEEE 802.11 control layers. +type Dot11Ctrl struct { + BaseLayer +} + +func (m *Dot11Ctrl) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload } + +func (m *Dot11Ctrl) LayerType() gopacket.LayerType { return LayerTypeDot11Ctrl } +func (m *Dot11Ctrl) CanDecode() gopacket.LayerClass { return LayerTypeDot11Ctrl } +func (m *Dot11Ctrl) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +func decodeDot11Ctrl(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11Ctrl{} + return decodingLayerDecoder(d, data, p) +} + +// Dot11WEP contains WEP encrpted IEEE 802.11 data. +type Dot11WEP struct { + BaseLayer +} + +func (m *Dot11WEP) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload } + +func (m *Dot11WEP) LayerType() gopacket.LayerType { return LayerTypeDot11WEP } +func (m *Dot11WEP) CanDecode() gopacket.LayerClass { return LayerTypeDot11WEP } +func (m *Dot11WEP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +func decodeDot11WEP(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11WEP{} + return decodingLayerDecoder(d, data, p) +} + +// Dot11Data is a base for all IEEE 802.11 data layers. +type Dot11Data struct { + BaseLayer +} + +func (m *Dot11Data) NextLayerType() gopacket.LayerType { + return LayerTypeLLC +} + +func (m *Dot11Data) LayerType() gopacket.LayerType { return LayerTypeDot11Data } +func (m *Dot11Data) CanDecode() gopacket.LayerClass { return LayerTypeDot11Data } +func (m *Dot11Data) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Payload = data + return nil +} + +func decodeDot11Data(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11Data{} + return decodingLayerDecoder(d, data, p) +} + +type Dot11DataCFAck struct { + Dot11Data +} + +func decodeDot11DataCFAck(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataCFAck{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataCFAck) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFAck } +func (m *Dot11DataCFAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAck } +func (m *Dot11DataCFAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Data.DecodeFromBytes(data, df) +} + +type Dot11DataCFPoll struct { + Dot11Data +} + +func decodeDot11DataCFPoll(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataCFPoll{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataCFPoll) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFPoll } +func (m *Dot11DataCFPoll) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFPoll } +func (m *Dot11DataCFPoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Data.DecodeFromBytes(data, df) +} + +type Dot11DataCFAckPoll struct { + Dot11Data +} + +func decodeDot11DataCFAckPoll(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataCFAckPoll{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataCFAckPoll) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFAckPoll } +func (m *Dot11DataCFAckPoll) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAckPoll } +func (m *Dot11DataCFAckPoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Data.DecodeFromBytes(data, df) +} + +type Dot11DataNull struct { + Dot11Data +} + +func decodeDot11DataNull(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataNull{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataNull) LayerType() gopacket.LayerType { return LayerTypeDot11DataNull } +func (m *Dot11DataNull) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataNull } +func (m *Dot11DataNull) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Data.DecodeFromBytes(data, df) +} + +type Dot11DataCFAckNoData struct { + Dot11Data +} + +func decodeDot11DataCFAckNoData(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataCFAckNoData{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataCFAckNoData) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFAckNoData } +func (m *Dot11DataCFAckNoData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAckNoData } +func (m *Dot11DataCFAckNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Data.DecodeFromBytes(data, df) +} + +type Dot11DataCFPollNoData struct { + Dot11Data +} + +func decodeDot11DataCFPollNoData(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataCFPollNoData{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataCFPollNoData) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFPollNoData } +func (m *Dot11DataCFPollNoData) CanDecode() gopacket.LayerClass { + return LayerTypeDot11DataCFPollNoData +} +func (m *Dot11DataCFPollNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Data.DecodeFromBytes(data, df) +} + +type Dot11DataCFAckPollNoData struct { + Dot11Data +} + +func decodeDot11DataCFAckPollNoData(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataCFAckPollNoData{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataCFAckPollNoData) LayerType() gopacket.LayerType { + return LayerTypeDot11DataCFAckPollNoData +} +func (m *Dot11DataCFAckPollNoData) CanDecode() gopacket.LayerClass { + return LayerTypeDot11DataCFAckPollNoData +} +func (m *Dot11DataCFAckPollNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Data.DecodeFromBytes(data, df) +} + +type Dot11DataQOS struct { + Dot11Ctrl +} + +func (m *Dot11DataQOS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.BaseLayer = BaseLayer{Payload: data} + return nil +} + +type Dot11DataQOSData struct { + Dot11DataQOS +} + +func decodeDot11DataQOSData(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataQOSData{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataQOSData) LayerType() gopacket.LayerType { return LayerTypeDot11DataQOSData } +func (m *Dot11DataQOSData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataQOSData } + +func (m *Dot11DataQOSData) NextLayerType() gopacket.LayerType { + return LayerTypeDot11Data +} + +type Dot11DataQOSDataCFAck struct { + Dot11DataQOS +} + +func decodeDot11DataQOSDataCFAck(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataQOSDataCFAck{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataQOSDataCFAck) LayerType() gopacket.LayerType { return LayerTypeDot11DataQOSDataCFAck } +func (m *Dot11DataQOSDataCFAck) CanDecode() gopacket.LayerClass { + return LayerTypeDot11DataQOSDataCFAck +} +func (m *Dot11DataQOSDataCFAck) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataCFAck } + +type Dot11DataQOSDataCFPoll struct { + Dot11DataQOS +} + +func decodeDot11DataQOSDataCFPoll(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataQOSDataCFPoll{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataQOSDataCFPoll) LayerType() gopacket.LayerType { + return LayerTypeDot11DataQOSDataCFPoll +} +func (m *Dot11DataQOSDataCFPoll) CanDecode() gopacket.LayerClass { + return LayerTypeDot11DataQOSDataCFPoll +} +func (m *Dot11DataQOSDataCFPoll) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataCFPoll } + +type Dot11DataQOSDataCFAckPoll struct { + Dot11DataQOS +} + +func decodeDot11DataQOSDataCFAckPoll(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataQOSDataCFAckPoll{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataQOSDataCFAckPoll) LayerType() gopacket.LayerType { + return LayerTypeDot11DataQOSDataCFAckPoll +} +func (m *Dot11DataQOSDataCFAckPoll) CanDecode() gopacket.LayerClass { + return LayerTypeDot11DataQOSDataCFAckPoll +} +func (m *Dot11DataQOSDataCFAckPoll) NextLayerType() gopacket.LayerType { + return LayerTypeDot11DataCFAckPoll +} + +type Dot11DataQOSNull struct { + Dot11DataQOS +} + +func decodeDot11DataQOSNull(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataQOSNull{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataQOSNull) LayerType() gopacket.LayerType { return LayerTypeDot11DataQOSNull } +func (m *Dot11DataQOSNull) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataQOSNull } +func (m *Dot11DataQOSNull) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataNull } + +type Dot11DataQOSCFPollNoData struct { + Dot11DataQOS +} + +func decodeDot11DataQOSCFPollNoData(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataQOSCFPollNoData{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataQOSCFPollNoData) LayerType() gopacket.LayerType { + return LayerTypeDot11DataQOSCFPollNoData +} +func (m *Dot11DataQOSCFPollNoData) CanDecode() gopacket.LayerClass { + return LayerTypeDot11DataQOSCFPollNoData +} +func (m *Dot11DataQOSCFPollNoData) NextLayerType() gopacket.LayerType { + return LayerTypeDot11DataCFPollNoData +} + +type Dot11DataQOSCFAckPollNoData struct { + Dot11DataQOS +} + +func decodeDot11DataQOSCFAckPollNoData(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataQOSCFAckPollNoData{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataQOSCFAckPollNoData) LayerType() gopacket.LayerType { + return LayerTypeDot11DataQOSCFAckPollNoData +} +func (m *Dot11DataQOSCFAckPollNoData) CanDecode() gopacket.LayerClass { + return LayerTypeDot11DataQOSCFAckPollNoData +} +func (m *Dot11DataQOSCFAckPollNoData) NextLayerType() gopacket.LayerType { + return LayerTypeDot11DataCFAckPollNoData +} + +type Dot11InformationElement struct { + BaseLayer + ID Dot11InformationElementID + Length uint8 + OUI []byte + Info []byte +} + +func (m *Dot11InformationElement) LayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} +func (m *Dot11InformationElement) CanDecode() gopacket.LayerClass { + return LayerTypeDot11InformationElement +} + +func (m *Dot11InformationElement) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} + +func (m *Dot11InformationElement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 2 { + df.SetTruncated() + return fmt.Errorf("Dot11InformationElement length %v too short, %v required", len(data), 2) + } + m.ID = Dot11InformationElementID(data[0]) + m.Length = data[1] + offset := int(2) + + if len(data) < offset+int(m.Length) { + df.SetTruncated() + return fmt.Errorf("Dot11InformationElement length %v too short, %v required", len(data), offset+int(m.Length)) + } + if len(data) < offset+4 { + df.SetTruncated() + return fmt.Errorf("vendor extension size < %d", offset+int(m.Length)) + } + if m.ID == 221 { + // Vendor extension + m.OUI = data[offset : offset+4] + m.Info = data[offset+4 : offset+int(m.Length)] + } else { + m.Info = data[offset : offset+int(m.Length)] + } + + offset += int(m.Length) + + m.BaseLayer = BaseLayer{Contents: data[:offset], Payload: data[offset:]} + return nil +} + +func (d *Dot11InformationElement) String() string { + if d.ID == 0 { + return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, SSID: %v)", d.ID, d.Length, string(d.Info)) + } else if d.ID == 1 { + rates := "" + for i := 0; i < len(d.Info); i++ { + if d.Info[i]&0x80 == 0 { + rates += fmt.Sprintf("%.1f ", float32(d.Info[i])*0.5) + } else { + rates += fmt.Sprintf("%.1f* ", float32(d.Info[i]&0x7F)*0.5) + } + } + return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, Rates: %s Mbit)", d.ID, d.Length, rates) + } else if d.ID == 221 { + return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, OUI: %X, Info: %X)", d.ID, d.Length, d.OUI, d.Info) + } else { + return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, Info: %X)", d.ID, d.Length, d.Info) + } +} + +func (m Dot11InformationElement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + length := len(m.Info) + len(m.OUI) + if buf, err := b.PrependBytes(2 + length); err != nil { + return err + } else { + buf[0] = uint8(m.ID) + buf[1] = uint8(length) + copy(buf[2:], m.OUI) + copy(buf[2+len(m.OUI):], m.Info) + } + return nil +} + +func decodeDot11InformationElement(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11InformationElement{} + return decodingLayerDecoder(d, data, p) +} + +type Dot11CtrlCTS struct { + Dot11Ctrl +} + +func decodeDot11CtrlCTS(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlCTS{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlCTS) LayerType() gopacket.LayerType { + return LayerTypeDot11CtrlCTS +} +func (m *Dot11CtrlCTS) CanDecode() gopacket.LayerClass { + return LayerTypeDot11CtrlCTS +} +func (m *Dot11CtrlCTS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11CtrlRTS struct { + Dot11Ctrl +} + +func decodeDot11CtrlRTS(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlRTS{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlRTS) LayerType() gopacket.LayerType { + return LayerTypeDot11CtrlRTS +} +func (m *Dot11CtrlRTS) CanDecode() gopacket.LayerClass { + return LayerTypeDot11CtrlRTS +} +func (m *Dot11CtrlRTS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11CtrlBlockAckReq struct { + Dot11Ctrl +} + +func decodeDot11CtrlBlockAckReq(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlBlockAckReq{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlBlockAckReq) LayerType() gopacket.LayerType { + return LayerTypeDot11CtrlBlockAckReq +} +func (m *Dot11CtrlBlockAckReq) CanDecode() gopacket.LayerClass { + return LayerTypeDot11CtrlBlockAckReq +} +func (m *Dot11CtrlBlockAckReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11CtrlBlockAck struct { + Dot11Ctrl +} + +func decodeDot11CtrlBlockAck(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlBlockAck{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlBlockAck) LayerType() gopacket.LayerType { return LayerTypeDot11CtrlBlockAck } +func (m *Dot11CtrlBlockAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11CtrlBlockAck } +func (m *Dot11CtrlBlockAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11CtrlPowersavePoll struct { + Dot11Ctrl +} + +func decodeDot11CtrlPowersavePoll(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlPowersavePoll{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlPowersavePoll) LayerType() gopacket.LayerType { + return LayerTypeDot11CtrlPowersavePoll +} +func (m *Dot11CtrlPowersavePoll) CanDecode() gopacket.LayerClass { + return LayerTypeDot11CtrlPowersavePoll +} +func (m *Dot11CtrlPowersavePoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11CtrlAck struct { + Dot11Ctrl +} + +func decodeDot11CtrlAck(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlAck{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlAck) LayerType() gopacket.LayerType { return LayerTypeDot11CtrlAck } +func (m *Dot11CtrlAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11CtrlAck } +func (m *Dot11CtrlAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11CtrlCFEnd struct { + Dot11Ctrl +} + +func decodeDot11CtrlCFEnd(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlCFEnd{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlCFEnd) LayerType() gopacket.LayerType { + return LayerTypeDot11CtrlCFEnd +} +func (m *Dot11CtrlCFEnd) CanDecode() gopacket.LayerClass { + return LayerTypeDot11CtrlCFEnd +} +func (m *Dot11CtrlCFEnd) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11CtrlCFEndAck struct { + Dot11Ctrl +} + +func decodeDot11CtrlCFEndAck(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlCFEndAck{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlCFEndAck) LayerType() gopacket.LayerType { + return LayerTypeDot11CtrlCFEndAck +} +func (m *Dot11CtrlCFEndAck) CanDecode() gopacket.LayerClass { + return LayerTypeDot11CtrlCFEndAck +} +func (m *Dot11CtrlCFEndAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11MgmtAssociationReq struct { + Dot11Mgmt + CapabilityInfo uint16 + ListenInterval uint16 +} + +func decodeDot11MgmtAssociationReq(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtAssociationReq{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtAssociationReq) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtAssociationReq +} +func (m *Dot11MgmtAssociationReq) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtAssociationReq +} +func (m *Dot11MgmtAssociationReq) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} +func (m *Dot11MgmtAssociationReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return fmt.Errorf("Dot11MgmtAssociationReq length %v too short, %v required", len(data), 4) + } + m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2]) + m.ListenInterval = binary.LittleEndian.Uint16(data[2:4]) + m.Payload = data[4:] + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m Dot11MgmtAssociationReq) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(4) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo) + binary.LittleEndian.PutUint16(buf[2:4], m.ListenInterval) + + return nil +} + +type Dot11MgmtAssociationResp struct { + Dot11Mgmt + CapabilityInfo uint16 + Status Dot11Status + AID uint16 +} + +func decodeDot11MgmtAssociationResp(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtAssociationResp{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtAssociationResp) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtAssociationResp +} +func (m *Dot11MgmtAssociationResp) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtAssociationResp +} +func (m *Dot11MgmtAssociationResp) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} +func (m *Dot11MgmtAssociationResp) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 6 { + df.SetTruncated() + return fmt.Errorf("Dot11MgmtAssociationResp length %v too short, %v required", len(data), 6) + } + m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2]) + m.Status = Dot11Status(binary.LittleEndian.Uint16(data[2:4])) + m.AID = binary.LittleEndian.Uint16(data[4:6]) + m.Payload = data[6:] + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m Dot11MgmtAssociationResp) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(6) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo) + binary.LittleEndian.PutUint16(buf[2:4], uint16(m.Status)) + binary.LittleEndian.PutUint16(buf[4:6], m.AID) + + return nil +} + +type Dot11MgmtReassociationReq struct { + Dot11Mgmt + CapabilityInfo uint16 + ListenInterval uint16 + CurrentApAddress net.HardwareAddr +} + +func decodeDot11MgmtReassociationReq(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtReassociationReq{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtReassociationReq) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtReassociationReq +} +func (m *Dot11MgmtReassociationReq) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtReassociationReq +} +func (m *Dot11MgmtReassociationReq) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} +func (m *Dot11MgmtReassociationReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 10 { + df.SetTruncated() + return fmt.Errorf("Dot11MgmtReassociationReq length %v too short, %v required", len(data), 10) + } + m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2]) + m.ListenInterval = binary.LittleEndian.Uint16(data[2:4]) + m.CurrentApAddress = net.HardwareAddr(data[4:10]) + m.Payload = data[10:] + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m Dot11MgmtReassociationReq) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(10) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo) + binary.LittleEndian.PutUint16(buf[2:4], m.ListenInterval) + + copy(buf[4:10], m.CurrentApAddress) + + return nil +} + +type Dot11MgmtReassociationResp struct { + Dot11Mgmt +} + +func decodeDot11MgmtReassociationResp(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtReassociationResp{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtReassociationResp) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtReassociationResp +} +func (m *Dot11MgmtReassociationResp) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtReassociationResp +} +func (m *Dot11MgmtReassociationResp) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} + +type Dot11MgmtProbeReq struct { + Dot11Mgmt +} + +func decodeDot11MgmtProbeReq(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtProbeReq{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtProbeReq) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtProbeReq } +func (m *Dot11MgmtProbeReq) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtProbeReq } +func (m *Dot11MgmtProbeReq) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} + +type Dot11MgmtProbeResp struct { + Dot11Mgmt + Timestamp uint64 + Interval uint16 + Flags uint16 +} + +func decodeDot11MgmtProbeResp(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtProbeResp{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtProbeResp) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtProbeResp } +func (m *Dot11MgmtProbeResp) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtProbeResp } +func (m *Dot11MgmtProbeResp) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 12 { + df.SetTruncated() + + return fmt.Errorf("Dot11MgmtProbeResp length %v too short, %v required", len(data), 12) + } + + m.Timestamp = binary.LittleEndian.Uint64(data[0:8]) + m.Interval = binary.LittleEndian.Uint16(data[8:10]) + m.Flags = binary.LittleEndian.Uint16(data[10:12]) + m.Payload = data[12:] + + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m *Dot11MgmtProbeResp) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} + +func (m Dot11MgmtProbeResp) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(12) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint64(buf[0:8], m.Timestamp) + binary.LittleEndian.PutUint16(buf[8:10], m.Interval) + binary.LittleEndian.PutUint16(buf[10:12], m.Flags) + + return nil +} + +type Dot11MgmtMeasurementPilot struct { + Dot11Mgmt +} + +func decodeDot11MgmtMeasurementPilot(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtMeasurementPilot{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtMeasurementPilot) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtMeasurementPilot +} +func (m *Dot11MgmtMeasurementPilot) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtMeasurementPilot +} + +type Dot11MgmtBeacon struct { + Dot11Mgmt + Timestamp uint64 + Interval uint16 + Flags uint16 +} + +func decodeDot11MgmtBeacon(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtBeacon{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtBeacon) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtBeacon } +func (m *Dot11MgmtBeacon) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtBeacon } +func (m *Dot11MgmtBeacon) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 12 { + df.SetTruncated() + return fmt.Errorf("Dot11MgmtBeacon length %v too short, %v required", len(data), 12) + } + m.Timestamp = binary.LittleEndian.Uint64(data[0:8]) + m.Interval = binary.LittleEndian.Uint16(data[8:10]) + m.Flags = binary.LittleEndian.Uint16(data[10:12]) + m.Payload = data[12:] + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m *Dot11MgmtBeacon) NextLayerType() gopacket.LayerType { return LayerTypeDot11InformationElement } + +func (m Dot11MgmtBeacon) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(12) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint64(buf[0:8], m.Timestamp) + binary.LittleEndian.PutUint16(buf[8:10], m.Interval) + binary.LittleEndian.PutUint16(buf[10:12], m.Flags) + + return nil +} + +type Dot11MgmtATIM struct { + Dot11Mgmt +} + +func decodeDot11MgmtATIM(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtATIM{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtATIM) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtATIM } +func (m *Dot11MgmtATIM) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtATIM } + +type Dot11MgmtDisassociation struct { + Dot11Mgmt + Reason Dot11Reason +} + +func decodeDot11MgmtDisassociation(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtDisassociation{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtDisassociation) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtDisassociation +} +func (m *Dot11MgmtDisassociation) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtDisassociation +} +func (m *Dot11MgmtDisassociation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 2 { + df.SetTruncated() + return fmt.Errorf("Dot11MgmtDisassociation length %v too short, %v required", len(data), 2) + } + m.Reason = Dot11Reason(binary.LittleEndian.Uint16(data[0:2])) + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m Dot11MgmtDisassociation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(2) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Reason)) + + return nil +} + +type Dot11MgmtAuthentication struct { + Dot11Mgmt + Algorithm Dot11Algorithm + Sequence uint16 + Status Dot11Status +} + +func decodeDot11MgmtAuthentication(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtAuthentication{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtAuthentication) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtAuthentication +} +func (m *Dot11MgmtAuthentication) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtAuthentication +} +func (m *Dot11MgmtAuthentication) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} +func (m *Dot11MgmtAuthentication) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 6 { + df.SetTruncated() + return fmt.Errorf("Dot11MgmtAuthentication length %v too short, %v required", len(data), 6) + } + m.Algorithm = Dot11Algorithm(binary.LittleEndian.Uint16(data[0:2])) + m.Sequence = binary.LittleEndian.Uint16(data[2:4]) + m.Status = Dot11Status(binary.LittleEndian.Uint16(data[4:6])) + m.Payload = data[6:] + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m Dot11MgmtAuthentication) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(6) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Algorithm)) + binary.LittleEndian.PutUint16(buf[2:4], m.Sequence) + binary.LittleEndian.PutUint16(buf[4:6], uint16(m.Status)) + + return nil +} + +type Dot11MgmtDeauthentication struct { + Dot11Mgmt + Reason Dot11Reason +} + +func decodeDot11MgmtDeauthentication(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtDeauthentication{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtDeauthentication) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtDeauthentication +} +func (m *Dot11MgmtDeauthentication) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtDeauthentication +} +func (m *Dot11MgmtDeauthentication) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 2 { + df.SetTruncated() + return fmt.Errorf("Dot11MgmtDeauthentication length %v too short, %v required", len(data), 2) + } + m.Reason = Dot11Reason(binary.LittleEndian.Uint16(data[0:2])) + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m Dot11MgmtDeauthentication) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(2) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Reason)) + + return nil +} + +type Dot11MgmtAction struct { + Dot11Mgmt +} + +func decodeDot11MgmtAction(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtAction{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtAction) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtAction } +func (m *Dot11MgmtAction) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtAction } + +type Dot11MgmtActionNoAck struct { + Dot11Mgmt +} + +func decodeDot11MgmtActionNoAck(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtActionNoAck{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtActionNoAck) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtActionNoAck } +func (m *Dot11MgmtActionNoAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtActionNoAck } + +type Dot11MgmtArubaWLAN struct { + Dot11Mgmt +} + +func decodeDot11MgmtArubaWLAN(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtArubaWLAN{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtArubaWLAN) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtArubaWLAN } +func (m *Dot11MgmtArubaWLAN) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtArubaWLAN } diff --git a/vendor/github.com/google/gopacket/layers/dot1q.go b/vendor/github.com/google/gopacket/layers/dot1q.go new file mode 100644 index 0000000000..5cdd2f8d68 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/dot1q.go @@ -0,0 +1,75 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + "github.com/google/gopacket" +) + +// Dot1Q is the packet layer for 802.1Q VLAN headers. +type Dot1Q struct { + BaseLayer + Priority uint8 + DropEligible bool + VLANIdentifier uint16 + Type EthernetType +} + +// LayerType returns gopacket.LayerTypeDot1Q +func (d *Dot1Q) LayerType() gopacket.LayerType { return LayerTypeDot1Q } + +// DecodeFromBytes decodes the given bytes into this layer. +func (d *Dot1Q) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return fmt.Errorf("802.1Q tag length %d too short", len(data)) + } + d.Priority = (data[0] & 0xE0) >> 5 + d.DropEligible = data[0]&0x10 != 0 + d.VLANIdentifier = binary.BigEndian.Uint16(data[:2]) & 0x0FFF + d.Type = EthernetType(binary.BigEndian.Uint16(data[2:4])) + d.BaseLayer = BaseLayer{Contents: data[:4], Payload: data[4:]} + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (d *Dot1Q) CanDecode() gopacket.LayerClass { + return LayerTypeDot1Q +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (d *Dot1Q) NextLayerType() gopacket.LayerType { + return d.Type.LayerType() +} + +func decodeDot1Q(data []byte, p gopacket.PacketBuilder) error { + d := &Dot1Q{} + return decodingLayerDecoder(d, data, p) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (d *Dot1Q) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(4) + if err != nil { + return err + } + if d.VLANIdentifier > 0xFFF { + return fmt.Errorf("vlan identifier %v is too high", d.VLANIdentifier) + } + firstBytes := uint16(d.Priority)<<13 | d.VLANIdentifier + if d.DropEligible { + firstBytes |= 0x1000 + } + binary.BigEndian.PutUint16(bytes, firstBytes) + binary.BigEndian.PutUint16(bytes[2:], uint16(d.Type)) + return nil +} diff --git a/vendor/github.com/google/gopacket/layers/eap.go b/vendor/github.com/google/gopacket/layers/eap.go new file mode 100644 index 0000000000..54238e8c73 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/eap.go @@ -0,0 +1,114 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + "github.com/google/gopacket" +) + +type EAPCode uint8 +type EAPType uint8 + +const ( + EAPCodeRequest EAPCode = 1 + EAPCodeResponse EAPCode = 2 + EAPCodeSuccess EAPCode = 3 + EAPCodeFailure EAPCode = 4 + + // EAPTypeNone means that this EAP layer has no Type or TypeData. + // Success and Failure EAPs will have this set. + EAPTypeNone EAPType = 0 + + EAPTypeIdentity EAPType = 1 + EAPTypeNotification EAPType = 2 + EAPTypeNACK EAPType = 3 + EAPTypeOTP EAPType = 4 + EAPTypeTokenCard EAPType = 5 +) + +// EAP defines an Extensible Authentication Protocol (rfc 3748) layer. +type EAP struct { + BaseLayer + Code EAPCode + Id uint8 + Length uint16 + Type EAPType + TypeData []byte +} + +// LayerType returns LayerTypeEAP. +func (e *EAP) LayerType() gopacket.LayerType { return LayerTypeEAP } + +// DecodeFromBytes decodes the given bytes into this layer. +func (e *EAP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return fmt.Errorf("EAP length %d too short", len(data)) + } + e.Code = EAPCode(data[0]) + e.Id = data[1] + e.Length = binary.BigEndian.Uint16(data[2:4]) + if len(data) < int(e.Length) { + df.SetTruncated() + return fmt.Errorf("EAP length %d too short, %d expected", len(data), e.Length) + } + switch { + case e.Length > 4: + e.Type = EAPType(data[4]) + e.TypeData = data[5:] + case e.Length == 4: + e.Type = 0 + e.TypeData = nil + default: + return fmt.Errorf("invalid EAP length %d", e.Length) + } + e.BaseLayer.Contents = data[:e.Length] + e.BaseLayer.Payload = data[e.Length:] // Should be 0 bytes + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (e *EAP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if opts.FixLengths { + e.Length = uint16(len(e.TypeData) + 1) + } + size := len(e.TypeData) + 4 + if size > 4 { + size++ + } + bytes, err := b.PrependBytes(size) + if err != nil { + return err + } + bytes[0] = byte(e.Code) + bytes[1] = e.Id + binary.BigEndian.PutUint16(bytes[2:], e.Length) + if size > 4 { + bytes[4] = byte(e.Type) + copy(bytes[5:], e.TypeData) + } + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (e *EAP) CanDecode() gopacket.LayerClass { + return LayerTypeEAP +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (e *EAP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +func decodeEAP(data []byte, p gopacket.PacketBuilder) error { + e := &EAP{} + return decodingLayerDecoder(e, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/eapol.go b/vendor/github.com/google/gopacket/layers/eapol.go new file mode 100644 index 0000000000..902598a206 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/eapol.go @@ -0,0 +1,302 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + "github.com/google/gopacket" +) + +// EAPOL defines an EAP over LAN (802.1x) layer. +type EAPOL struct { + BaseLayer + Version uint8 + Type EAPOLType + Length uint16 +} + +// LayerType returns LayerTypeEAPOL. +func (e *EAPOL) LayerType() gopacket.LayerType { return LayerTypeEAPOL } + +// DecodeFromBytes decodes the given bytes into this layer. +func (e *EAPOL) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return fmt.Errorf("EAPOL length %d too short", len(data)) + } + e.Version = data[0] + e.Type = EAPOLType(data[1]) + e.Length = binary.BigEndian.Uint16(data[2:4]) + e.BaseLayer = BaseLayer{data[:4], data[4:]} + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer +func (e *EAPOL) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, _ := b.PrependBytes(4) + bytes[0] = e.Version + bytes[1] = byte(e.Type) + binary.BigEndian.PutUint16(bytes[2:], e.Length) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (e *EAPOL) CanDecode() gopacket.LayerClass { + return LayerTypeEAPOL +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (e *EAPOL) NextLayerType() gopacket.LayerType { + return e.Type.LayerType() +} + +func decodeEAPOL(data []byte, p gopacket.PacketBuilder) error { + e := &EAPOL{} + return decodingLayerDecoder(e, data, p) +} + +// EAPOLKeyDescriptorType is an enumeration of key descriptor types +// as specified by 802.1x in the EAPOL-Key frame +type EAPOLKeyDescriptorType uint8 + +// Enumeration of EAPOLKeyDescriptorType +const ( + EAPOLKeyDescriptorTypeRC4 EAPOLKeyDescriptorType = 1 + EAPOLKeyDescriptorTypeDot11 EAPOLKeyDescriptorType = 2 + EAPOLKeyDescriptorTypeWPA EAPOLKeyDescriptorType = 254 +) + +func (kdt EAPOLKeyDescriptorType) String() string { + switch kdt { + case EAPOLKeyDescriptorTypeRC4: + return "RC4" + case EAPOLKeyDescriptorTypeDot11: + return "802.11" + case EAPOLKeyDescriptorTypeWPA: + return "WPA" + default: + return fmt.Sprintf("unknown descriptor type %d", kdt) + } +} + +// EAPOLKeyDescriptorVersion is an enumeration of versions specifying the +// encryption algorithm for the key data and the authentication for the +// message integrity code (MIC) +type EAPOLKeyDescriptorVersion uint8 + +// Enumeration of EAPOLKeyDescriptorVersion +const ( + EAPOLKeyDescriptorVersionOther EAPOLKeyDescriptorVersion = 0 + EAPOLKeyDescriptorVersionRC4HMACMD5 EAPOLKeyDescriptorVersion = 1 + EAPOLKeyDescriptorVersionAESHMACSHA1 EAPOLKeyDescriptorVersion = 2 + EAPOLKeyDescriptorVersionAES128CMAC EAPOLKeyDescriptorVersion = 3 +) + +func (v EAPOLKeyDescriptorVersion) String() string { + switch v { + case EAPOLKeyDescriptorVersionOther: + return "Other" + case EAPOLKeyDescriptorVersionRC4HMACMD5: + return "RC4-HMAC-MD5" + case EAPOLKeyDescriptorVersionAESHMACSHA1: + return "AES-HMAC-SHA1-128" + case EAPOLKeyDescriptorVersionAES128CMAC: + return "AES-128-CMAC" + default: + return fmt.Sprintf("unknown version %d", v) + } +} + +// EAPOLKeyType is an enumeration of key derivation types describing +// the purpose of the keys being derived. +type EAPOLKeyType uint8 + +// Enumeration of EAPOLKeyType +const ( + EAPOLKeyTypeGroupSMK EAPOLKeyType = 0 + EAPOLKeyTypePairwise EAPOLKeyType = 1 +) + +func (kt EAPOLKeyType) String() string { + switch kt { + case EAPOLKeyTypeGroupSMK: + return "Group/SMK" + case EAPOLKeyTypePairwise: + return "Pairwise" + default: + return fmt.Sprintf("unknown key type %d", kt) + } +} + +// EAPOLKey defines an EAPOL-Key frame for 802.1x authentication +type EAPOLKey struct { + BaseLayer + KeyDescriptorType EAPOLKeyDescriptorType + KeyDescriptorVersion EAPOLKeyDescriptorVersion + KeyType EAPOLKeyType + KeyIndex uint8 + Install bool + KeyACK bool + KeyMIC bool + Secure bool + MICError bool + Request bool + HasEncryptedKeyData bool + SMKMessage bool + KeyLength uint16 + ReplayCounter uint64 + Nonce []byte + IV []byte + RSC uint64 + ID uint64 + MIC []byte + KeyDataLength uint16 + EncryptedKeyData []byte +} + +// LayerType returns LayerTypeEAPOLKey. +func (ek *EAPOLKey) LayerType() gopacket.LayerType { + return LayerTypeEAPOLKey +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (ek *EAPOLKey) CanDecode() gopacket.LayerType { + return LayerTypeEAPOLKey +} + +// NextLayerType returns layers.LayerTypeDot11InformationElement if the key +// data exists and is unencrypted, otherwise it does not expect a next layer. +func (ek *EAPOLKey) NextLayerType() gopacket.LayerType { + if !ek.HasEncryptedKeyData && ek.KeyDataLength > 0 { + return LayerTypeDot11InformationElement + } + return gopacket.LayerTypePayload +} + +const eapolKeyFrameLen = 95 + +// DecodeFromBytes decodes the given bytes into this layer. +func (ek *EAPOLKey) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < eapolKeyFrameLen { + df.SetTruncated() + return fmt.Errorf("EAPOLKey length %v too short, %v required", + len(data), eapolKeyFrameLen) + } + + ek.KeyDescriptorType = EAPOLKeyDescriptorType(data[0]) + + info := binary.BigEndian.Uint16(data[1:3]) + ek.KeyDescriptorVersion = EAPOLKeyDescriptorVersion(info & 0x0007) + ek.KeyType = EAPOLKeyType((info & 0x0008) >> 3) + ek.KeyIndex = uint8((info & 0x0030) >> 4) + ek.Install = (info & 0x0040) != 0 + ek.KeyACK = (info & 0x0080) != 0 + ek.KeyMIC = (info & 0x0100) != 0 + ek.Secure = (info & 0x0200) != 0 + ek.MICError = (info & 0x0400) != 0 + ek.Request = (info & 0x0800) != 0 + ek.HasEncryptedKeyData = (info & 0x1000) != 0 + ek.SMKMessage = (info & 0x2000) != 0 + + ek.KeyLength = binary.BigEndian.Uint16(data[3:5]) + ek.ReplayCounter = binary.BigEndian.Uint64(data[5:13]) + + ek.Nonce = data[13:45] + ek.IV = data[45:61] + ek.RSC = binary.BigEndian.Uint64(data[61:69]) + ek.ID = binary.BigEndian.Uint64(data[69:77]) + ek.MIC = data[77:93] + + ek.KeyDataLength = binary.BigEndian.Uint16(data[93:95]) + + totalLength := eapolKeyFrameLen + int(ek.KeyDataLength) + if len(data) < totalLength { + df.SetTruncated() + return fmt.Errorf("EAPOLKey data length %d too short, %d required", + len(data)-eapolKeyFrameLen, ek.KeyDataLength) + } + + if ek.HasEncryptedKeyData { + ek.EncryptedKeyData = data[eapolKeyFrameLen:totalLength] + ek.BaseLayer = BaseLayer{ + Contents: data[:totalLength], + Payload: data[totalLength:], + } + } else { + ek.BaseLayer = BaseLayer{ + Contents: data[:eapolKeyFrameLen], + Payload: data[eapolKeyFrameLen:], + } + } + + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (ek *EAPOLKey) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(eapolKeyFrameLen + len(ek.EncryptedKeyData)) + if err != nil { + return err + } + + buf[0] = byte(ek.KeyDescriptorType) + + var info uint16 + info |= uint16(ek.KeyDescriptorVersion) + info |= uint16(ek.KeyType) << 3 + info |= uint16(ek.KeyIndex) << 4 + if ek.Install { + info |= 0x0040 + } + if ek.KeyACK { + info |= 0x0080 + } + if ek.KeyMIC { + info |= 0x0100 + } + if ek.Secure { + info |= 0x0200 + } + if ek.MICError { + info |= 0x0400 + } + if ek.Request { + info |= 0x0800 + } + if ek.HasEncryptedKeyData { + info |= 0x1000 + } + if ek.SMKMessage { + info |= 0x2000 + } + binary.BigEndian.PutUint16(buf[1:3], info) + + binary.BigEndian.PutUint16(buf[3:5], ek.KeyLength) + binary.BigEndian.PutUint64(buf[5:13], ek.ReplayCounter) + + copy(buf[13:45], ek.Nonce) + copy(buf[45:61], ek.IV) + binary.BigEndian.PutUint64(buf[61:69], ek.RSC) + binary.BigEndian.PutUint64(buf[69:77], ek.ID) + copy(buf[77:93], ek.MIC) + + binary.BigEndian.PutUint16(buf[93:95], ek.KeyDataLength) + if len(ek.EncryptedKeyData) > 0 { + copy(buf[95:95+len(ek.EncryptedKeyData)], ek.EncryptedKeyData) + } + + return nil +} + +func decodeEAPOLKey(data []byte, p gopacket.PacketBuilder) error { + ek := &EAPOLKey{} + return decodingLayerDecoder(ek, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/endpoints.go b/vendor/github.com/google/gopacket/layers/endpoints.go new file mode 100644 index 0000000000..4c91cc3324 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/endpoints.go @@ -0,0 +1,97 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "github.com/google/gopacket" + "net" + "strconv" +) + +var ( + // We use two different endpoint types for IPv4 vs IPv6 addresses, so that + // ordering with endpointA.LessThan(endpointB) sanely groups all IPv4 + // addresses and all IPv6 addresses, such that IPv6 > IPv4 for all addresses. + EndpointIPv4 = gopacket.RegisterEndpointType(1, gopacket.EndpointTypeMetadata{Name: "IPv4", Formatter: func(b []byte) string { + return net.IP(b).String() + }}) + EndpointIPv6 = gopacket.RegisterEndpointType(2, gopacket.EndpointTypeMetadata{Name: "IPv6", Formatter: func(b []byte) string { + return net.IP(b).String() + }}) + + EndpointMAC = gopacket.RegisterEndpointType(3, gopacket.EndpointTypeMetadata{Name: "MAC", Formatter: func(b []byte) string { + return net.HardwareAddr(b).String() + }}) + EndpointTCPPort = gopacket.RegisterEndpointType(4, gopacket.EndpointTypeMetadata{Name: "TCP", Formatter: func(b []byte) string { + return strconv.Itoa(int(binary.BigEndian.Uint16(b))) + }}) + EndpointUDPPort = gopacket.RegisterEndpointType(5, gopacket.EndpointTypeMetadata{Name: "UDP", Formatter: func(b []byte) string { + return strconv.Itoa(int(binary.BigEndian.Uint16(b))) + }}) + EndpointSCTPPort = gopacket.RegisterEndpointType(6, gopacket.EndpointTypeMetadata{Name: "SCTP", Formatter: func(b []byte) string { + return strconv.Itoa(int(binary.BigEndian.Uint16(b))) + }}) + EndpointRUDPPort = gopacket.RegisterEndpointType(7, gopacket.EndpointTypeMetadata{Name: "RUDP", Formatter: func(b []byte) string { + return strconv.Itoa(int(b[0])) + }}) + EndpointUDPLitePort = gopacket.RegisterEndpointType(8, gopacket.EndpointTypeMetadata{Name: "UDPLite", Formatter: func(b []byte) string { + return strconv.Itoa(int(binary.BigEndian.Uint16(b))) + }}) + EndpointPPP = gopacket.RegisterEndpointType(9, gopacket.EndpointTypeMetadata{Name: "PPP", Formatter: func([]byte) string { + return "point" + }}) +) + +// NewIPEndpoint creates a new IP (v4 or v6) endpoint from a net.IP address. +// It returns gopacket.InvalidEndpoint if the IP address is invalid. +func NewIPEndpoint(a net.IP) gopacket.Endpoint { + ipv4 := a.To4() + if ipv4 != nil { + return gopacket.NewEndpoint(EndpointIPv4, []byte(ipv4)) + } + + ipv6 := a.To16() + if ipv6 != nil { + return gopacket.NewEndpoint(EndpointIPv6, []byte(ipv6)) + } + + return gopacket.InvalidEndpoint +} + +// NewMACEndpoint returns a new MAC address endpoint. +func NewMACEndpoint(a net.HardwareAddr) gopacket.Endpoint { + return gopacket.NewEndpoint(EndpointMAC, []byte(a)) +} +func newPortEndpoint(t gopacket.EndpointType, p uint16) gopacket.Endpoint { + return gopacket.NewEndpoint(t, []byte{byte(p >> 8), byte(p)}) +} + +// NewTCPPortEndpoint returns an endpoint based on a TCP port. +func NewTCPPortEndpoint(p TCPPort) gopacket.Endpoint { + return newPortEndpoint(EndpointTCPPort, uint16(p)) +} + +// NewUDPPortEndpoint returns an endpoint based on a UDP port. +func NewUDPPortEndpoint(p UDPPort) gopacket.Endpoint { + return newPortEndpoint(EndpointUDPPort, uint16(p)) +} + +// NewSCTPPortEndpoint returns an endpoint based on a SCTP port. +func NewSCTPPortEndpoint(p SCTPPort) gopacket.Endpoint { + return newPortEndpoint(EndpointSCTPPort, uint16(p)) +} + +// NewRUDPPortEndpoint returns an endpoint based on a RUDP port. +func NewRUDPPortEndpoint(p RUDPPort) gopacket.Endpoint { + return gopacket.NewEndpoint(EndpointRUDPPort, []byte{byte(p)}) +} + +// NewUDPLitePortEndpoint returns an endpoint based on a UDPLite port. +func NewUDPLitePortEndpoint(p UDPLitePort) gopacket.Endpoint { + return newPortEndpoint(EndpointUDPLitePort, uint16(p)) +} diff --git a/vendor/github.com/google/gopacket/layers/enums.go b/vendor/github.com/google/gopacket/layers/enums.go new file mode 100644 index 0000000000..8427bdaf47 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/enums.go @@ -0,0 +1,443 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "fmt" + "runtime" + + "github.com/google/gopacket" +) + +// EnumMetadata keeps track of a set of metadata for each enumeration value +// for protocol enumerations. +type EnumMetadata struct { + // DecodeWith is the decoder to use to decode this protocol's data. + DecodeWith gopacket.Decoder + // Name is the name of the enumeration value. + Name string + // LayerType is the layer type implied by the given enum. + LayerType gopacket.LayerType +} + +// EthernetType is an enumeration of ethernet type values, and acts as a decoder +// for any type it supports. +type EthernetType uint16 + +const ( + // EthernetTypeLLC is not an actual ethernet type. It is instead a + // placeholder we use in Ethernet frames that use the 802.3 standard of + // srcmac|dstmac|length|LLC instead of srcmac|dstmac|ethertype. + EthernetTypeLLC EthernetType = 0 + EthernetTypeIPv4 EthernetType = 0x0800 + EthernetTypeARP EthernetType = 0x0806 + EthernetTypeIPv6 EthernetType = 0x86DD + EthernetTypeCiscoDiscovery EthernetType = 0x2000 + EthernetTypeNortelDiscovery EthernetType = 0x01a2 + EthernetTypeTransparentEthernetBridging EthernetType = 0x6558 + EthernetTypeDot1Q EthernetType = 0x8100 + EthernetTypePPP EthernetType = 0x880b + EthernetTypePPPoEDiscovery EthernetType = 0x8863 + EthernetTypePPPoESession EthernetType = 0x8864 + EthernetTypeMPLSUnicast EthernetType = 0x8847 + EthernetTypeMPLSMulticast EthernetType = 0x8848 + EthernetTypeEAPOL EthernetType = 0x888e + EthernetTypeERSPAN EthernetType = 0x88be + EthernetTypeQinQ EthernetType = 0x88a8 + EthernetTypeLinkLayerDiscovery EthernetType = 0x88cc + EthernetTypeEthernetCTP EthernetType = 0x9000 +) + +// IPProtocol is an enumeration of IP protocol values, and acts as a decoder +// for any type it supports. +type IPProtocol uint8 + +const ( + IPProtocolIPv6HopByHop IPProtocol = 0 + IPProtocolICMPv4 IPProtocol = 1 + IPProtocolIGMP IPProtocol = 2 + IPProtocolIPv4 IPProtocol = 4 + IPProtocolTCP IPProtocol = 6 + IPProtocolUDP IPProtocol = 17 + IPProtocolRUDP IPProtocol = 27 + IPProtocolIPv6 IPProtocol = 41 + IPProtocolIPv6Routing IPProtocol = 43 + IPProtocolIPv6Fragment IPProtocol = 44 + IPProtocolGRE IPProtocol = 47 + IPProtocolESP IPProtocol = 50 + IPProtocolAH IPProtocol = 51 + IPProtocolICMPv6 IPProtocol = 58 + IPProtocolNoNextHeader IPProtocol = 59 + IPProtocolIPv6Destination IPProtocol = 60 + IPProtocolOSPF IPProtocol = 89 + IPProtocolIPIP IPProtocol = 94 + IPProtocolEtherIP IPProtocol = 97 + IPProtocolVRRP IPProtocol = 112 + IPProtocolSCTP IPProtocol = 132 + IPProtocolUDPLite IPProtocol = 136 + IPProtocolMPLSInIP IPProtocol = 137 +) + +// LinkType is an enumeration of link types, and acts as a decoder for any +// link type it supports. +type LinkType uint8 + +const ( + // According to pcap-linktype(7) and http://www.tcpdump.org/linktypes.html + LinkTypeNull LinkType = 0 + LinkTypeEthernet LinkType = 1 + LinkTypeAX25 LinkType = 3 + LinkTypeTokenRing LinkType = 6 + LinkTypeArcNet LinkType = 7 + LinkTypeSLIP LinkType = 8 + LinkTypePPP LinkType = 9 + LinkTypeFDDI LinkType = 10 + LinkTypePPP_HDLC LinkType = 50 + LinkTypePPPEthernet LinkType = 51 + LinkTypeATM_RFC1483 LinkType = 100 + LinkTypeRaw LinkType = 101 + LinkTypeC_HDLC LinkType = 104 + LinkTypeIEEE802_11 LinkType = 105 + LinkTypeFRelay LinkType = 107 + LinkTypeLoop LinkType = 108 + LinkTypeLinuxSLL LinkType = 113 + LinkTypeLTalk LinkType = 114 + LinkTypePFLog LinkType = 117 + LinkTypePrismHeader LinkType = 119 + LinkTypeIPOverFC LinkType = 122 + LinkTypeSunATM LinkType = 123 + LinkTypeIEEE80211Radio LinkType = 127 + LinkTypeARCNetLinux LinkType = 129 + LinkTypeIPOver1394 LinkType = 138 + LinkTypeMTP2Phdr LinkType = 139 + LinkTypeMTP2 LinkType = 140 + LinkTypeMTP3 LinkType = 141 + LinkTypeSCCP LinkType = 142 + LinkTypeDOCSIS LinkType = 143 + LinkTypeLinuxIRDA LinkType = 144 + LinkTypeLinuxLAPD LinkType = 177 + LinkTypeLinuxUSB LinkType = 220 + LinkTypeFC2 LinkType = 224 + LinkTypeFC2Framed LinkType = 225 + LinkTypeIPv4 LinkType = 228 + LinkTypeIPv6 LinkType = 229 +) + +// PPPoECode is the PPPoE code enum, taken from http://tools.ietf.org/html/rfc2516 +type PPPoECode uint8 + +const ( + PPPoECodePADI PPPoECode = 0x09 + PPPoECodePADO PPPoECode = 0x07 + PPPoECodePADR PPPoECode = 0x19 + PPPoECodePADS PPPoECode = 0x65 + PPPoECodePADT PPPoECode = 0xA7 + PPPoECodeSession PPPoECode = 0x00 +) + +// PPPType is an enumeration of PPP type values, and acts as a decoder for any +// type it supports. +type PPPType uint16 + +const ( + PPPTypeIPv4 PPPType = 0x0021 + PPPTypeIPv6 PPPType = 0x0057 + PPPTypeMPLSUnicast PPPType = 0x0281 + PPPTypeMPLSMulticast PPPType = 0x0283 +) + +// SCTPChunkType is an enumeration of chunk types inside SCTP packets. +type SCTPChunkType uint8 + +const ( + SCTPChunkTypeData SCTPChunkType = 0 + SCTPChunkTypeInit SCTPChunkType = 1 + SCTPChunkTypeInitAck SCTPChunkType = 2 + SCTPChunkTypeSack SCTPChunkType = 3 + SCTPChunkTypeHeartbeat SCTPChunkType = 4 + SCTPChunkTypeHeartbeatAck SCTPChunkType = 5 + SCTPChunkTypeAbort SCTPChunkType = 6 + SCTPChunkTypeShutdown SCTPChunkType = 7 + SCTPChunkTypeShutdownAck SCTPChunkType = 8 + SCTPChunkTypeError SCTPChunkType = 9 + SCTPChunkTypeCookieEcho SCTPChunkType = 10 + SCTPChunkTypeCookieAck SCTPChunkType = 11 + SCTPChunkTypeShutdownComplete SCTPChunkType = 14 +) + +// FDDIFrameControl is an enumeration of FDDI frame control bytes. +type FDDIFrameControl uint8 + +const ( + FDDIFrameControlLLC FDDIFrameControl = 0x50 +) + +// EAPOLType is an enumeration of EAPOL packet types. +type EAPOLType uint8 + +const ( + EAPOLTypeEAP EAPOLType = 0 + EAPOLTypeStart EAPOLType = 1 + EAPOLTypeLogOff EAPOLType = 2 + EAPOLTypeKey EAPOLType = 3 + EAPOLTypeASFAlert EAPOLType = 4 +) + +// ProtocolFamily is the set of values defined as PF_* in sys/socket.h +type ProtocolFamily uint8 + +const ( + ProtocolFamilyIPv4 ProtocolFamily = 2 + // BSDs use different values for INET6... glory be. These values taken from + // tcpdump 4.3.0. + ProtocolFamilyIPv6BSD ProtocolFamily = 24 + ProtocolFamilyIPv6FreeBSD ProtocolFamily = 28 + ProtocolFamilyIPv6Darwin ProtocolFamily = 30 + ProtocolFamilyIPv6Linux ProtocolFamily = 10 +) + +// Dot11Type is a combination of IEEE 802.11 frame's Type and Subtype fields. +// By combining these two fields together into a single type, we're able to +// provide a String function that correctly displays the subtype given the +// top-level type. +// +// If you just care about the top-level type, use the MainType function. +type Dot11Type uint8 + +// MainType strips the subtype information from the given type, +// returning just the overarching type (Mgmt, Ctrl, Data, Reserved). +func (d Dot11Type) MainType() Dot11Type { + return d & dot11TypeMask +} + +func (d Dot11Type) QOS() bool { + return d&dot11QOSMask == Dot11TypeDataQOSData +} + +const ( + Dot11TypeMgmt Dot11Type = 0x00 + Dot11TypeCtrl Dot11Type = 0x01 + Dot11TypeData Dot11Type = 0x02 + Dot11TypeReserved Dot11Type = 0x03 + dot11TypeMask = 0x03 + dot11QOSMask = 0x23 + + // The following are type/subtype conglomerations. + + // Management + Dot11TypeMgmtAssociationReq Dot11Type = 0x00 + Dot11TypeMgmtAssociationResp Dot11Type = 0x04 + Dot11TypeMgmtReassociationReq Dot11Type = 0x08 + Dot11TypeMgmtReassociationResp Dot11Type = 0x0c + Dot11TypeMgmtProbeReq Dot11Type = 0x10 + Dot11TypeMgmtProbeResp Dot11Type = 0x14 + Dot11TypeMgmtMeasurementPilot Dot11Type = 0x18 + Dot11TypeMgmtBeacon Dot11Type = 0x20 + Dot11TypeMgmtATIM Dot11Type = 0x24 + Dot11TypeMgmtDisassociation Dot11Type = 0x28 + Dot11TypeMgmtAuthentication Dot11Type = 0x2c + Dot11TypeMgmtDeauthentication Dot11Type = 0x30 + Dot11TypeMgmtAction Dot11Type = 0x34 + Dot11TypeMgmtActionNoAck Dot11Type = 0x38 + + // Control + Dot11TypeCtrlWrapper Dot11Type = 0x1d + Dot11TypeCtrlBlockAckReq Dot11Type = 0x21 + Dot11TypeCtrlBlockAck Dot11Type = 0x25 + Dot11TypeCtrlPowersavePoll Dot11Type = 0x29 + Dot11TypeCtrlRTS Dot11Type = 0x2d + Dot11TypeCtrlCTS Dot11Type = 0x31 + Dot11TypeCtrlAck Dot11Type = 0x35 + Dot11TypeCtrlCFEnd Dot11Type = 0x39 + Dot11TypeCtrlCFEndAck Dot11Type = 0x3d + + // Data + Dot11TypeDataCFAck Dot11Type = 0x06 + Dot11TypeDataCFPoll Dot11Type = 0x0a + Dot11TypeDataCFAckPoll Dot11Type = 0x0e + Dot11TypeDataNull Dot11Type = 0x12 + Dot11TypeDataCFAckNoData Dot11Type = 0x16 + Dot11TypeDataCFPollNoData Dot11Type = 0x1a + Dot11TypeDataCFAckPollNoData Dot11Type = 0x1e + Dot11TypeDataQOSData Dot11Type = 0x22 + Dot11TypeDataQOSDataCFAck Dot11Type = 0x26 + Dot11TypeDataQOSDataCFPoll Dot11Type = 0x2a + Dot11TypeDataQOSDataCFAckPoll Dot11Type = 0x2e + Dot11TypeDataQOSNull Dot11Type = 0x32 + Dot11TypeDataQOSCFPollNoData Dot11Type = 0x3a + Dot11TypeDataQOSCFAckPollNoData Dot11Type = 0x3e +) + +// Decode a raw v4 or v6 IP packet. +func decodeIPv4or6(data []byte, p gopacket.PacketBuilder) error { + version := data[0] >> 4 + switch version { + case 4: + return decodeIPv4(data, p) + case 6: + return decodeIPv6(data, p) + } + return fmt.Errorf("Invalid IP packet version %v", version) +} + +func initActualTypeData() { + // Each of the XXXTypeMetadata arrays contains mappings of how to handle enum + // values for various enum types in gopacket/layers. + // These arrays are actually created by gen2.go and stored in + // enums_generated.go. + // + // So, EthernetTypeMetadata[2] contains information on how to handle EthernetType + // 2, including which name to give it and which decoder to use to decode + // packet data of that type. These arrays are filled by default with all of the + // protocols gopacket/layers knows how to handle, but users of the library can + // add new decoders or override existing ones. For example, if you write a better + // TCP decoder, you can override IPProtocolMetadata[IPProtocolTCP].DecodeWith + // with your new decoder, and all gopacket/layers decoding will use your new + // decoder whenever they encounter that IPProtocol. + + // Here we link up all enumerations with their respective names and decoders. + EthernetTypeMetadata[EthernetTypeLLC] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLLC), Name: "LLC", LayerType: LayerTypeLLC} + EthernetTypeMetadata[EthernetTypeIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4} + EthernetTypeMetadata[EthernetTypeIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6} + EthernetTypeMetadata[EthernetTypeARP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeARP), Name: "ARP", LayerType: LayerTypeARP} + EthernetTypeMetadata[EthernetTypeDot1Q] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot1Q), Name: "Dot1Q", LayerType: LayerTypeDot1Q} + EthernetTypeMetadata[EthernetTypePPP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP", LayerType: LayerTypePPP} + EthernetTypeMetadata[EthernetTypePPPoEDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPPoE), Name: "PPPoEDiscovery", LayerType: LayerTypePPPoE} + EthernetTypeMetadata[EthernetTypePPPoESession] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPPoE), Name: "PPPoESession", LayerType: LayerTypePPPoE} + EthernetTypeMetadata[EthernetTypeEthernetCTP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernetCTP), Name: "EthernetCTP", LayerType: LayerTypeEthernetCTP} + EthernetTypeMetadata[EthernetTypeCiscoDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeCiscoDiscovery), Name: "CiscoDiscovery", LayerType: LayerTypeCiscoDiscovery} + EthernetTypeMetadata[EthernetTypeNortelDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeNortelDiscovery), Name: "NortelDiscovery", LayerType: LayerTypeNortelDiscovery} + EthernetTypeMetadata[EthernetTypeLinkLayerDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLinkLayerDiscovery), Name: "LinkLayerDiscovery", LayerType: LayerTypeLinkLayerDiscovery} + EthernetTypeMetadata[EthernetTypeMPLSUnicast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSUnicast", LayerType: LayerTypeMPLS} + EthernetTypeMetadata[EthernetTypeMPLSMulticast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSMulticast", LayerType: LayerTypeMPLS} + EthernetTypeMetadata[EthernetTypeEAPOL] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAPOL), Name: "EAPOL", LayerType: LayerTypeEAPOL} + EthernetTypeMetadata[EthernetTypeQinQ] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot1Q), Name: "Dot1Q", LayerType: LayerTypeDot1Q} + EthernetTypeMetadata[EthernetTypeTransparentEthernetBridging] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernet), Name: "TransparentEthernetBridging", LayerType: LayerTypeEthernet} + EthernetTypeMetadata[EthernetTypeERSPAN] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeERSPANII), Name: "ERSPAN Type II", LayerType: LayerTypeERSPANII} + + IPProtocolMetadata[IPProtocolIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4} + IPProtocolMetadata[IPProtocolTCP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeTCP), Name: "TCP", LayerType: LayerTypeTCP} + IPProtocolMetadata[IPProtocolUDP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUDP), Name: "UDP", LayerType: LayerTypeUDP} + IPProtocolMetadata[IPProtocolICMPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeICMPv4), Name: "ICMPv4", LayerType: LayerTypeICMPv4} + IPProtocolMetadata[IPProtocolICMPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeICMPv6), Name: "ICMPv6", LayerType: LayerTypeICMPv6} + IPProtocolMetadata[IPProtocolSCTP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTP), Name: "SCTP", LayerType: LayerTypeSCTP} + IPProtocolMetadata[IPProtocolIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6} + IPProtocolMetadata[IPProtocolIPIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4} + IPProtocolMetadata[IPProtocolEtherIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEtherIP), Name: "EtherIP", LayerType: LayerTypeEtherIP} + IPProtocolMetadata[IPProtocolRUDP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeRUDP), Name: "RUDP", LayerType: LayerTypeRUDP} + IPProtocolMetadata[IPProtocolGRE] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeGRE), Name: "GRE", LayerType: LayerTypeGRE} + IPProtocolMetadata[IPProtocolIPv6HopByHop] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6HopByHop), Name: "IPv6HopByHop", LayerType: LayerTypeIPv6HopByHop} + IPProtocolMetadata[IPProtocolIPv6Routing] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Routing), Name: "IPv6Routing", LayerType: LayerTypeIPv6Routing} + IPProtocolMetadata[IPProtocolIPv6Fragment] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Fragment), Name: "IPv6Fragment", LayerType: LayerTypeIPv6Fragment} + IPProtocolMetadata[IPProtocolIPv6Destination] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Destination), Name: "IPv6Destination", LayerType: LayerTypeIPv6Destination} + IPProtocolMetadata[IPProtocolOSPF] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeOSPF), Name: "OSPF", LayerType: LayerTypeOSPF} + IPProtocolMetadata[IPProtocolAH] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPSecAH), Name: "IPSecAH", LayerType: LayerTypeIPSecAH} + IPProtocolMetadata[IPProtocolESP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPSecESP), Name: "IPSecESP", LayerType: LayerTypeIPSecESP} + IPProtocolMetadata[IPProtocolUDPLite] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUDPLite), Name: "UDPLite", LayerType: LayerTypeUDPLite} + IPProtocolMetadata[IPProtocolMPLSInIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLS", LayerType: LayerTypeMPLS} + IPProtocolMetadata[IPProtocolNoNextHeader] = EnumMetadata{DecodeWith: gopacket.DecodePayload, Name: "NoNextHeader", LayerType: gopacket.LayerTypePayload} + IPProtocolMetadata[IPProtocolIGMP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIGMP), Name: "IGMP", LayerType: LayerTypeIGMP} + IPProtocolMetadata[IPProtocolVRRP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeVRRP), Name: "VRRP", LayerType: LayerTypeVRRP} + + SCTPChunkTypeMetadata[SCTPChunkTypeData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPData), Name: "Data"} + SCTPChunkTypeMetadata[SCTPChunkTypeInit] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPInit), Name: "Init"} + SCTPChunkTypeMetadata[SCTPChunkTypeInitAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPInit), Name: "InitAck"} + SCTPChunkTypeMetadata[SCTPChunkTypeSack] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPSack), Name: "Sack"} + SCTPChunkTypeMetadata[SCTPChunkTypeHeartbeat] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPHeartbeat), Name: "Heartbeat"} + SCTPChunkTypeMetadata[SCTPChunkTypeHeartbeatAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPHeartbeat), Name: "HeartbeatAck"} + SCTPChunkTypeMetadata[SCTPChunkTypeAbort] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPError), Name: "Abort"} + SCTPChunkTypeMetadata[SCTPChunkTypeError] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPError), Name: "Error"} + SCTPChunkTypeMetadata[SCTPChunkTypeShutdown] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPShutdown), Name: "Shutdown"} + SCTPChunkTypeMetadata[SCTPChunkTypeShutdownAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPShutdownAck), Name: "ShutdownAck"} + SCTPChunkTypeMetadata[SCTPChunkTypeCookieEcho] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPCookieEcho), Name: "CookieEcho"} + SCTPChunkTypeMetadata[SCTPChunkTypeCookieAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPEmptyLayer), Name: "CookieAck"} + SCTPChunkTypeMetadata[SCTPChunkTypeShutdownComplete] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPEmptyLayer), Name: "ShutdownComplete"} + + PPPTypeMetadata[PPPTypeIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4"} + PPPTypeMetadata[PPPTypeIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6"} + PPPTypeMetadata[PPPTypeMPLSUnicast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSUnicast"} + PPPTypeMetadata[PPPTypeMPLSMulticast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSMulticast"} + + PPPoECodeMetadata[PPPoECodeSession] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP"} + + LinkTypeMetadata[LinkTypeEthernet] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernet), Name: "Ethernet"} + LinkTypeMetadata[LinkTypePPP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP"} + LinkTypeMetadata[LinkTypeFDDI] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeFDDI), Name: "FDDI"} + LinkTypeMetadata[LinkTypeNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLoopback), Name: "Null"} + LinkTypeMetadata[LinkTypeIEEE802_11] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11), Name: "Dot11"} + LinkTypeMetadata[LinkTypeLoop] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLoopback), Name: "Loop"} + LinkTypeMetadata[LinkTypeIEEE802_11] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11), Name: "802.11"} + LinkTypeMetadata[LinkTypeRaw] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"} + // See https://github.com/the-tcpdump-group/libpcap/blob/170f717e6e818cdc4bcbbfd906b63088eaa88fa0/pcap/dlt.h#L85 + // Or https://github.com/wireshark/wireshark/blob/854cfe53efe44080609c78053ecfb2342ad84a08/wiretap/pcap-common.c#L508 + if runtime.GOOS == "openbsd" { + LinkTypeMetadata[14] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"} + } else { + LinkTypeMetadata[12] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"} + } + LinkTypeMetadata[LinkTypePFLog] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePFLog), Name: "PFLog"} + LinkTypeMetadata[LinkTypeIEEE80211Radio] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeRadioTap), Name: "RadioTap"} + LinkTypeMetadata[LinkTypeLinuxUSB] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSB), Name: "USB"} + LinkTypeMetadata[LinkTypeLinuxSLL] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLinuxSLL), Name: "Linux SLL"} + LinkTypeMetadata[LinkTypePrismHeader] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePrismHeader), Name: "Prism"} + + FDDIFrameControlMetadata[FDDIFrameControlLLC] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLLC), Name: "LLC"} + + EAPOLTypeMetadata[EAPOLTypeEAP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAP), Name: "EAP", LayerType: LayerTypeEAP} + EAPOLTypeMetadata[EAPOLTypeKey] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAPOLKey), Name: "EAPOLKey", LayerType: LayerTypeEAPOLKey} + + ProtocolFamilyMetadata[ProtocolFamilyIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4} + ProtocolFamilyMetadata[ProtocolFamilyIPv6BSD] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6} + ProtocolFamilyMetadata[ProtocolFamilyIPv6FreeBSD] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6} + ProtocolFamilyMetadata[ProtocolFamilyIPv6Darwin] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6} + ProtocolFamilyMetadata[ProtocolFamilyIPv6Linux] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6} + + Dot11TypeMetadata[Dot11TypeMgmtAssociationReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAssociationReq), Name: "MgmtAssociationReq", LayerType: LayerTypeDot11MgmtAssociationReq} + Dot11TypeMetadata[Dot11TypeMgmtAssociationResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAssociationResp), Name: "MgmtAssociationResp", LayerType: LayerTypeDot11MgmtAssociationResp} + Dot11TypeMetadata[Dot11TypeMgmtReassociationReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtReassociationReq), Name: "MgmtReassociationReq", LayerType: LayerTypeDot11MgmtReassociationReq} + Dot11TypeMetadata[Dot11TypeMgmtReassociationResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtReassociationResp), Name: "MgmtReassociationResp", LayerType: LayerTypeDot11MgmtReassociationResp} + Dot11TypeMetadata[Dot11TypeMgmtProbeReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtProbeReq), Name: "MgmtProbeReq", LayerType: LayerTypeDot11MgmtProbeReq} + Dot11TypeMetadata[Dot11TypeMgmtProbeResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtProbeResp), Name: "MgmtProbeResp", LayerType: LayerTypeDot11MgmtProbeResp} + Dot11TypeMetadata[Dot11TypeMgmtMeasurementPilot] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtMeasurementPilot), Name: "MgmtMeasurementPilot", LayerType: LayerTypeDot11MgmtMeasurementPilot} + Dot11TypeMetadata[Dot11TypeMgmtBeacon] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtBeacon), Name: "MgmtBeacon", LayerType: LayerTypeDot11MgmtBeacon} + Dot11TypeMetadata[Dot11TypeMgmtATIM] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtATIM), Name: "MgmtATIM", LayerType: LayerTypeDot11MgmtATIM} + Dot11TypeMetadata[Dot11TypeMgmtDisassociation] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtDisassociation), Name: "MgmtDisassociation", LayerType: LayerTypeDot11MgmtDisassociation} + Dot11TypeMetadata[Dot11TypeMgmtAuthentication] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAuthentication), Name: "MgmtAuthentication", LayerType: LayerTypeDot11MgmtAuthentication} + Dot11TypeMetadata[Dot11TypeMgmtDeauthentication] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtDeauthentication), Name: "MgmtDeauthentication", LayerType: LayerTypeDot11MgmtDeauthentication} + Dot11TypeMetadata[Dot11TypeMgmtAction] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAction), Name: "MgmtAction", LayerType: LayerTypeDot11MgmtAction} + Dot11TypeMetadata[Dot11TypeMgmtActionNoAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtActionNoAck), Name: "MgmtActionNoAck", LayerType: LayerTypeDot11MgmtActionNoAck} + Dot11TypeMetadata[Dot11TypeCtrl] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Ctrl), Name: "Ctrl", LayerType: LayerTypeDot11Ctrl} + Dot11TypeMetadata[Dot11TypeCtrlWrapper] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Ctrl), Name: "CtrlWrapper", LayerType: LayerTypeDot11Ctrl} + Dot11TypeMetadata[Dot11TypeCtrlBlockAckReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlBlockAckReq), Name: "CtrlBlockAckReq", LayerType: LayerTypeDot11CtrlBlockAckReq} + Dot11TypeMetadata[Dot11TypeCtrlBlockAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlBlockAck), Name: "CtrlBlockAck", LayerType: LayerTypeDot11CtrlBlockAck} + Dot11TypeMetadata[Dot11TypeCtrlPowersavePoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlPowersavePoll), Name: "CtrlPowersavePoll", LayerType: LayerTypeDot11CtrlPowersavePoll} + Dot11TypeMetadata[Dot11TypeCtrlRTS] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlRTS), Name: "CtrlRTS", LayerType: LayerTypeDot11CtrlRTS} + Dot11TypeMetadata[Dot11TypeCtrlCTS] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCTS), Name: "CtrlCTS", LayerType: LayerTypeDot11CtrlCTS} + Dot11TypeMetadata[Dot11TypeCtrlAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlAck), Name: "CtrlAck", LayerType: LayerTypeDot11CtrlAck} + Dot11TypeMetadata[Dot11TypeCtrlCFEnd] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCFEnd), Name: "CtrlCFEnd", LayerType: LayerTypeDot11CtrlCFEnd} + Dot11TypeMetadata[Dot11TypeCtrlCFEndAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCFEndAck), Name: "CtrlCFEndAck", LayerType: LayerTypeDot11CtrlCFEndAck} + Dot11TypeMetadata[Dot11TypeData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Data), Name: "Data", LayerType: LayerTypeDot11Data} + Dot11TypeMetadata[Dot11TypeDataCFAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAck), Name: "DataCFAck", LayerType: LayerTypeDot11DataCFAck} + Dot11TypeMetadata[Dot11TypeDataCFPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFPoll), Name: "DataCFPoll", LayerType: LayerTypeDot11DataCFPoll} + Dot11TypeMetadata[Dot11TypeDataCFAckPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckPoll), Name: "DataCFAckPoll", LayerType: LayerTypeDot11DataCFAckPoll} + Dot11TypeMetadata[Dot11TypeDataNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataNull), Name: "DataNull", LayerType: LayerTypeDot11DataNull} + Dot11TypeMetadata[Dot11TypeDataCFAckNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckNoData), Name: "DataCFAckNoData", LayerType: LayerTypeDot11DataCFAckNoData} + Dot11TypeMetadata[Dot11TypeDataCFPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFPollNoData), Name: "DataCFPollNoData", LayerType: LayerTypeDot11DataCFPollNoData} + Dot11TypeMetadata[Dot11TypeDataCFAckPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckPollNoData), Name: "DataCFAckPollNoData", LayerType: LayerTypeDot11DataCFAckPollNoData} + Dot11TypeMetadata[Dot11TypeDataQOSData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSData), Name: "DataQOSData", LayerType: LayerTypeDot11DataQOSData} + Dot11TypeMetadata[Dot11TypeDataQOSDataCFAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAck), Name: "DataQOSDataCFAck", LayerType: LayerTypeDot11DataQOSDataCFAck} + Dot11TypeMetadata[Dot11TypeDataQOSDataCFPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFPoll), Name: "DataQOSDataCFPoll", LayerType: LayerTypeDot11DataQOSDataCFPoll} + Dot11TypeMetadata[Dot11TypeDataQOSDataCFAckPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAckPoll), Name: "DataQOSDataCFAckPoll", LayerType: LayerTypeDot11DataQOSDataCFAckPoll} + Dot11TypeMetadata[Dot11TypeDataQOSNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSNull), Name: "DataQOSNull", LayerType: LayerTypeDot11DataQOSNull} + Dot11TypeMetadata[Dot11TypeDataQOSCFPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSCFPollNoData), Name: "DataQOSCFPollNoData", LayerType: LayerTypeDot11DataQOSCFPollNoData} + Dot11TypeMetadata[Dot11TypeDataQOSCFAckPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSCFAckPollNoData), Name: "DataQOSCFAckPollNoData", LayerType: LayerTypeDot11DataQOSCFAckPollNoData} + + USBTransportTypeMetadata[USBTransportTypeInterrupt] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBInterrupt), Name: "Interrupt", LayerType: LayerTypeUSBInterrupt} + USBTransportTypeMetadata[USBTransportTypeControl] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBControl), Name: "Control", LayerType: LayerTypeUSBControl} + USBTransportTypeMetadata[USBTransportTypeBulk] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBBulk), Name: "Bulk", LayerType: LayerTypeUSBBulk} +} diff --git a/vendor/github.com/google/gopacket/layers/enums_generated.go b/vendor/github.com/google/gopacket/layers/enums_generated.go new file mode 100644 index 0000000000..bf77aac501 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/enums_generated.go @@ -0,0 +1,434 @@ +// Copyright 2012 Google, Inc. All rights reserved. + +package layers + +// Created by gen2.go, don't edit manually +// Generated at 2017-10-23 10:20:24.458771856 -0600 MDT m=+0.001159033 + +import ( + "fmt" + + "github.com/google/gopacket" +) + +func init() { + initUnknownTypesForLinkType() + initUnknownTypesForEthernetType() + initUnknownTypesForPPPType() + initUnknownTypesForIPProtocol() + initUnknownTypesForSCTPChunkType() + initUnknownTypesForPPPoECode() + initUnknownTypesForFDDIFrameControl() + initUnknownTypesForEAPOLType() + initUnknownTypesForProtocolFamily() + initUnknownTypesForDot11Type() + initUnknownTypesForUSBTransportType() + initActualTypeData() +} + +// Decoder calls LinkTypeMetadata.DecodeWith's decoder. +func (a LinkType) Decode(data []byte, p gopacket.PacketBuilder) error { + return LinkTypeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns LinkTypeMetadata.Name. +func (a LinkType) String() string { + return LinkTypeMetadata[a].Name +} + +// LayerType returns LinkTypeMetadata.LayerType. +func (a LinkType) LayerType() gopacket.LayerType { + return LinkTypeMetadata[a].LayerType +} + +type errorDecoderForLinkType int + +func (a *errorDecoderForLinkType) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForLinkType) Error() string { + return fmt.Sprintf("Unable to decode LinkType %d", int(*a)) +} + +var errorDecodersForLinkType [256]errorDecoderForLinkType +var LinkTypeMetadata [256]EnumMetadata + +func initUnknownTypesForLinkType() { + for i := 0; i < 256; i++ { + errorDecodersForLinkType[i] = errorDecoderForLinkType(i) + LinkTypeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForLinkType[i], + Name: "UnknownLinkType", + } + } +} + +// Decoder calls EthernetTypeMetadata.DecodeWith's decoder. +func (a EthernetType) Decode(data []byte, p gopacket.PacketBuilder) error { + return EthernetTypeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns EthernetTypeMetadata.Name. +func (a EthernetType) String() string { + return EthernetTypeMetadata[a].Name +} + +// LayerType returns EthernetTypeMetadata.LayerType. +func (a EthernetType) LayerType() gopacket.LayerType { + return EthernetTypeMetadata[a].LayerType +} + +type errorDecoderForEthernetType int + +func (a *errorDecoderForEthernetType) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForEthernetType) Error() string { + return fmt.Sprintf("Unable to decode EthernetType %d", int(*a)) +} + +var errorDecodersForEthernetType [65536]errorDecoderForEthernetType +var EthernetTypeMetadata [65536]EnumMetadata + +func initUnknownTypesForEthernetType() { + for i := 0; i < 65536; i++ { + errorDecodersForEthernetType[i] = errorDecoderForEthernetType(i) + EthernetTypeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForEthernetType[i], + Name: "UnknownEthernetType", + } + } +} + +// Decoder calls PPPTypeMetadata.DecodeWith's decoder. +func (a PPPType) Decode(data []byte, p gopacket.PacketBuilder) error { + return PPPTypeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns PPPTypeMetadata.Name. +func (a PPPType) String() string { + return PPPTypeMetadata[a].Name +} + +// LayerType returns PPPTypeMetadata.LayerType. +func (a PPPType) LayerType() gopacket.LayerType { + return PPPTypeMetadata[a].LayerType +} + +type errorDecoderForPPPType int + +func (a *errorDecoderForPPPType) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForPPPType) Error() string { + return fmt.Sprintf("Unable to decode PPPType %d", int(*a)) +} + +var errorDecodersForPPPType [65536]errorDecoderForPPPType +var PPPTypeMetadata [65536]EnumMetadata + +func initUnknownTypesForPPPType() { + for i := 0; i < 65536; i++ { + errorDecodersForPPPType[i] = errorDecoderForPPPType(i) + PPPTypeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForPPPType[i], + Name: "UnknownPPPType", + } + } +} + +// Decoder calls IPProtocolMetadata.DecodeWith's decoder. +func (a IPProtocol) Decode(data []byte, p gopacket.PacketBuilder) error { + return IPProtocolMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns IPProtocolMetadata.Name. +func (a IPProtocol) String() string { + return IPProtocolMetadata[a].Name +} + +// LayerType returns IPProtocolMetadata.LayerType. +func (a IPProtocol) LayerType() gopacket.LayerType { + return IPProtocolMetadata[a].LayerType +} + +type errorDecoderForIPProtocol int + +func (a *errorDecoderForIPProtocol) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForIPProtocol) Error() string { + return fmt.Sprintf("Unable to decode IPProtocol %d", int(*a)) +} + +var errorDecodersForIPProtocol [256]errorDecoderForIPProtocol +var IPProtocolMetadata [256]EnumMetadata + +func initUnknownTypesForIPProtocol() { + for i := 0; i < 256; i++ { + errorDecodersForIPProtocol[i] = errorDecoderForIPProtocol(i) + IPProtocolMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForIPProtocol[i], + Name: "UnknownIPProtocol", + } + } +} + +// Decoder calls SCTPChunkTypeMetadata.DecodeWith's decoder. +func (a SCTPChunkType) Decode(data []byte, p gopacket.PacketBuilder) error { + return SCTPChunkTypeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns SCTPChunkTypeMetadata.Name. +func (a SCTPChunkType) String() string { + return SCTPChunkTypeMetadata[a].Name +} + +// LayerType returns SCTPChunkTypeMetadata.LayerType. +func (a SCTPChunkType) LayerType() gopacket.LayerType { + return SCTPChunkTypeMetadata[a].LayerType +} + +type errorDecoderForSCTPChunkType int + +func (a *errorDecoderForSCTPChunkType) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForSCTPChunkType) Error() string { + return fmt.Sprintf("Unable to decode SCTPChunkType %d", int(*a)) +} + +var errorDecodersForSCTPChunkType [256]errorDecoderForSCTPChunkType +var SCTPChunkTypeMetadata [256]EnumMetadata + +func initUnknownTypesForSCTPChunkType() { + for i := 0; i < 256; i++ { + errorDecodersForSCTPChunkType[i] = errorDecoderForSCTPChunkType(i) + SCTPChunkTypeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForSCTPChunkType[i], + Name: "UnknownSCTPChunkType", + } + } +} + +// Decoder calls PPPoECodeMetadata.DecodeWith's decoder. +func (a PPPoECode) Decode(data []byte, p gopacket.PacketBuilder) error { + return PPPoECodeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns PPPoECodeMetadata.Name. +func (a PPPoECode) String() string { + return PPPoECodeMetadata[a].Name +} + +// LayerType returns PPPoECodeMetadata.LayerType. +func (a PPPoECode) LayerType() gopacket.LayerType { + return PPPoECodeMetadata[a].LayerType +} + +type errorDecoderForPPPoECode int + +func (a *errorDecoderForPPPoECode) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForPPPoECode) Error() string { + return fmt.Sprintf("Unable to decode PPPoECode %d", int(*a)) +} + +var errorDecodersForPPPoECode [256]errorDecoderForPPPoECode +var PPPoECodeMetadata [256]EnumMetadata + +func initUnknownTypesForPPPoECode() { + for i := 0; i < 256; i++ { + errorDecodersForPPPoECode[i] = errorDecoderForPPPoECode(i) + PPPoECodeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForPPPoECode[i], + Name: "UnknownPPPoECode", + } + } +} + +// Decoder calls FDDIFrameControlMetadata.DecodeWith's decoder. +func (a FDDIFrameControl) Decode(data []byte, p gopacket.PacketBuilder) error { + return FDDIFrameControlMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns FDDIFrameControlMetadata.Name. +func (a FDDIFrameControl) String() string { + return FDDIFrameControlMetadata[a].Name +} + +// LayerType returns FDDIFrameControlMetadata.LayerType. +func (a FDDIFrameControl) LayerType() gopacket.LayerType { + return FDDIFrameControlMetadata[a].LayerType +} + +type errorDecoderForFDDIFrameControl int + +func (a *errorDecoderForFDDIFrameControl) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForFDDIFrameControl) Error() string { + return fmt.Sprintf("Unable to decode FDDIFrameControl %d", int(*a)) +} + +var errorDecodersForFDDIFrameControl [256]errorDecoderForFDDIFrameControl +var FDDIFrameControlMetadata [256]EnumMetadata + +func initUnknownTypesForFDDIFrameControl() { + for i := 0; i < 256; i++ { + errorDecodersForFDDIFrameControl[i] = errorDecoderForFDDIFrameControl(i) + FDDIFrameControlMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForFDDIFrameControl[i], + Name: "UnknownFDDIFrameControl", + } + } +} + +// Decoder calls EAPOLTypeMetadata.DecodeWith's decoder. +func (a EAPOLType) Decode(data []byte, p gopacket.PacketBuilder) error { + return EAPOLTypeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns EAPOLTypeMetadata.Name. +func (a EAPOLType) String() string { + return EAPOLTypeMetadata[a].Name +} + +// LayerType returns EAPOLTypeMetadata.LayerType. +func (a EAPOLType) LayerType() gopacket.LayerType { + return EAPOLTypeMetadata[a].LayerType +} + +type errorDecoderForEAPOLType int + +func (a *errorDecoderForEAPOLType) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForEAPOLType) Error() string { + return fmt.Sprintf("Unable to decode EAPOLType %d", int(*a)) +} + +var errorDecodersForEAPOLType [256]errorDecoderForEAPOLType +var EAPOLTypeMetadata [256]EnumMetadata + +func initUnknownTypesForEAPOLType() { + for i := 0; i < 256; i++ { + errorDecodersForEAPOLType[i] = errorDecoderForEAPOLType(i) + EAPOLTypeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForEAPOLType[i], + Name: "UnknownEAPOLType", + } + } +} + +// Decoder calls ProtocolFamilyMetadata.DecodeWith's decoder. +func (a ProtocolFamily) Decode(data []byte, p gopacket.PacketBuilder) error { + return ProtocolFamilyMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns ProtocolFamilyMetadata.Name. +func (a ProtocolFamily) String() string { + return ProtocolFamilyMetadata[a].Name +} + +// LayerType returns ProtocolFamilyMetadata.LayerType. +func (a ProtocolFamily) LayerType() gopacket.LayerType { + return ProtocolFamilyMetadata[a].LayerType +} + +type errorDecoderForProtocolFamily int + +func (a *errorDecoderForProtocolFamily) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForProtocolFamily) Error() string { + return fmt.Sprintf("Unable to decode ProtocolFamily %d", int(*a)) +} + +var errorDecodersForProtocolFamily [256]errorDecoderForProtocolFamily +var ProtocolFamilyMetadata [256]EnumMetadata + +func initUnknownTypesForProtocolFamily() { + for i := 0; i < 256; i++ { + errorDecodersForProtocolFamily[i] = errorDecoderForProtocolFamily(i) + ProtocolFamilyMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForProtocolFamily[i], + Name: "UnknownProtocolFamily", + } + } +} + +// Decoder calls Dot11TypeMetadata.DecodeWith's decoder. +func (a Dot11Type) Decode(data []byte, p gopacket.PacketBuilder) error { + return Dot11TypeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns Dot11TypeMetadata.Name. +func (a Dot11Type) String() string { + return Dot11TypeMetadata[a].Name +} + +// LayerType returns Dot11TypeMetadata.LayerType. +func (a Dot11Type) LayerType() gopacket.LayerType { + return Dot11TypeMetadata[a].LayerType +} + +type errorDecoderForDot11Type int + +func (a *errorDecoderForDot11Type) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForDot11Type) Error() string { + return fmt.Sprintf("Unable to decode Dot11Type %d", int(*a)) +} + +var errorDecodersForDot11Type [256]errorDecoderForDot11Type +var Dot11TypeMetadata [256]EnumMetadata + +func initUnknownTypesForDot11Type() { + for i := 0; i < 256; i++ { + errorDecodersForDot11Type[i] = errorDecoderForDot11Type(i) + Dot11TypeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForDot11Type[i], + Name: "UnknownDot11Type", + } + } +} + +// Decoder calls USBTransportTypeMetadata.DecodeWith's decoder. +func (a USBTransportType) Decode(data []byte, p gopacket.PacketBuilder) error { + return USBTransportTypeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns USBTransportTypeMetadata.Name. +func (a USBTransportType) String() string { + return USBTransportTypeMetadata[a].Name +} + +// LayerType returns USBTransportTypeMetadata.LayerType. +func (a USBTransportType) LayerType() gopacket.LayerType { + return USBTransportTypeMetadata[a].LayerType +} + +type errorDecoderForUSBTransportType int + +func (a *errorDecoderForUSBTransportType) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForUSBTransportType) Error() string { + return fmt.Sprintf("Unable to decode USBTransportType %d", int(*a)) +} + +var errorDecodersForUSBTransportType [256]errorDecoderForUSBTransportType +var USBTransportTypeMetadata [256]EnumMetadata + +func initUnknownTypesForUSBTransportType() { + for i := 0; i < 256; i++ { + errorDecodersForUSBTransportType[i] = errorDecoderForUSBTransportType(i) + USBTransportTypeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForUSBTransportType[i], + Name: "UnknownUSBTransportType", + } + } +} diff --git a/vendor/github.com/google/gopacket/layers/erspan2.go b/vendor/github.com/google/gopacket/layers/erspan2.go new file mode 100644 index 0000000000..154436205e --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/erspan2.go @@ -0,0 +1,86 @@ +// Copyright 2018 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + + "github.com/google/gopacket" +) + +const ( + //ERSPANIIVersionObsolete - The obsolete value for the version field + ERSPANIIVersionObsolete = 0x0 + // ERSPANIIVersion - The current value for the version field + ERSPANIIVersion = 0x1 +) + +// ERSPANII contains all of the fields found in an ERSPAN Type II header +// https://tools.ietf.org/html/draft-foschiano-erspan-03 +type ERSPANII struct { + BaseLayer + IsTruncated bool + Version, CoS, TrunkEncap uint8 + VLANIdentifier, SessionID, Reserved uint16 + Index uint32 +} + +func (erspan2 *ERSPANII) LayerType() gopacket.LayerType { return LayerTypeERSPANII } + +// DecodeFromBytes decodes the given bytes into this layer. +func (erspan2 *ERSPANII) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + erspan2Length := 8 + erspan2.Version = data[0] & 0xF0 >> 4 + erspan2.VLANIdentifier = binary.BigEndian.Uint16(data[:2]) & 0x0FFF + erspan2.CoS = data[2] & 0xE0 >> 5 + erspan2.TrunkEncap = data[2] & 0x18 >> 3 + erspan2.IsTruncated = data[2]&0x4>>2 != 0 + erspan2.SessionID = binary.BigEndian.Uint16(data[2:4]) & 0x03FF + erspan2.Reserved = binary.BigEndian.Uint16(data[4:6]) & 0xFFF0 >> 4 + erspan2.Index = binary.BigEndian.Uint32(data[4:8]) & 0x000FFFFF + erspan2.Contents = data[:erspan2Length] + erspan2.Payload = data[erspan2Length:] + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (erspan2 *ERSPANII) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(8) + if err != nil { + return err + } + + twoByteInt := uint16(erspan2.Version&0xF)<<12 | erspan2.VLANIdentifier&0x0FFF + binary.BigEndian.PutUint16(bytes, twoByteInt) + + twoByteInt = uint16(erspan2.CoS&0x7)<<13 | uint16(erspan2.TrunkEncap&0x3)<<11 | erspan2.SessionID&0x03FF + if erspan2.IsTruncated { + twoByteInt |= 0x400 + } + binary.BigEndian.PutUint16(bytes[2:], twoByteInt) + + fourByteInt := uint32(erspan2.Reserved&0x0FFF)<<20 | erspan2.Index&0x000FFFFF + binary.BigEndian.PutUint32(bytes[4:], fourByteInt) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (erspan2 *ERSPANII) CanDecode() gopacket.LayerClass { + return LayerTypeERSPANII +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (erspan2 *ERSPANII) NextLayerType() gopacket.LayerType { + return LayerTypeEthernet +} + +func decodeERSPANII(data []byte, p gopacket.PacketBuilder) error { + erspan2 := &ERSPANII{} + return decodingLayerDecoder(erspan2, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/etherip.go b/vendor/github.com/google/gopacket/layers/etherip.go new file mode 100644 index 0000000000..5b7b7229ec --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/etherip.go @@ -0,0 +1,45 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "github.com/google/gopacket" +) + +// EtherIP is the struct for storing RFC 3378 EtherIP packet headers. +type EtherIP struct { + BaseLayer + Version uint8 + Reserved uint16 +} + +// LayerType returns gopacket.LayerTypeEtherIP. +func (e *EtherIP) LayerType() gopacket.LayerType { return LayerTypeEtherIP } + +// DecodeFromBytes decodes the given bytes into this layer. +func (e *EtherIP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + e.Version = data[0] >> 4 + e.Reserved = binary.BigEndian.Uint16(data[:2]) & 0x0fff + e.BaseLayer = BaseLayer{data[:2], data[2:]} + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (e *EtherIP) CanDecode() gopacket.LayerClass { + return LayerTypeEtherIP +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (e *EtherIP) NextLayerType() gopacket.LayerType { + return LayerTypeEthernet +} + +func decodeEtherIP(data []byte, p gopacket.PacketBuilder) error { + e := &EtherIP{} + return decodingLayerDecoder(e, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/ethernet.go b/vendor/github.com/google/gopacket/layers/ethernet.go new file mode 100644 index 0000000000..b73748f2f7 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/ethernet.go @@ -0,0 +1,123 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "github.com/google/gopacket" + "net" +) + +// EthernetBroadcast is the broadcast MAC address used by Ethernet. +var EthernetBroadcast = net.HardwareAddr{0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + +// Ethernet is the layer for Ethernet frame headers. +type Ethernet struct { + BaseLayer + SrcMAC, DstMAC net.HardwareAddr + EthernetType EthernetType + // Length is only set if a length field exists within this header. Ethernet + // headers follow two different standards, one that uses an EthernetType, the + // other which defines a length the follows with a LLC header (802.3). If the + // former is the case, we set EthernetType and Length stays 0. In the latter + // case, we set Length and EthernetType = EthernetTypeLLC. + Length uint16 +} + +// LayerType returns LayerTypeEthernet +func (e *Ethernet) LayerType() gopacket.LayerType { return LayerTypeEthernet } + +func (e *Ethernet) LinkFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointMAC, e.SrcMAC, e.DstMAC) +} + +func (eth *Ethernet) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 14 { + return errors.New("Ethernet packet too small") + } + eth.DstMAC = net.HardwareAddr(data[0:6]) + eth.SrcMAC = net.HardwareAddr(data[6:12]) + eth.EthernetType = EthernetType(binary.BigEndian.Uint16(data[12:14])) + eth.BaseLayer = BaseLayer{data[:14], data[14:]} + eth.Length = 0 + if eth.EthernetType < 0x0600 { + eth.Length = uint16(eth.EthernetType) + eth.EthernetType = EthernetTypeLLC + if cmp := len(eth.Payload) - int(eth.Length); cmp < 0 { + df.SetTruncated() + } else if cmp > 0 { + // Strip off bytes at the end, since we have too many bytes + eth.Payload = eth.Payload[:len(eth.Payload)-cmp] + } + // fmt.Println(eth) + } + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (eth *Ethernet) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if len(eth.DstMAC) != 6 { + return fmt.Errorf("invalid dst MAC: %v", eth.DstMAC) + } + if len(eth.SrcMAC) != 6 { + return fmt.Errorf("invalid src MAC: %v", eth.SrcMAC) + } + payload := b.Bytes() + bytes, err := b.PrependBytes(14) + if err != nil { + return err + } + copy(bytes, eth.DstMAC) + copy(bytes[6:], eth.SrcMAC) + if eth.Length != 0 || eth.EthernetType == EthernetTypeLLC { + if opts.FixLengths { + eth.Length = uint16(len(payload)) + } + if eth.EthernetType != EthernetTypeLLC { + return fmt.Errorf("ethernet type %v not compatible with length value %v", eth.EthernetType, eth.Length) + } else if eth.Length > 0x0600 { + return fmt.Errorf("invalid ethernet length %v", eth.Length) + } + binary.BigEndian.PutUint16(bytes[12:], eth.Length) + } else { + binary.BigEndian.PutUint16(bytes[12:], uint16(eth.EthernetType)) + } + length := len(b.Bytes()) + if length < 60 { + // Pad out to 60 bytes. + padding, err := b.AppendBytes(60 - length) + if err != nil { + return err + } + copy(padding, lotsOfZeros[:]) + } + return nil +} + +func (eth *Ethernet) CanDecode() gopacket.LayerClass { + return LayerTypeEthernet +} + +func (eth *Ethernet) NextLayerType() gopacket.LayerType { + return eth.EthernetType.LayerType() +} + +func decodeEthernet(data []byte, p gopacket.PacketBuilder) error { + eth := &Ethernet{} + err := eth.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(eth) + p.SetLinkLayer(eth) + return p.NextDecoder(eth.EthernetType) +} diff --git a/vendor/github.com/google/gopacket/layers/fddi.go b/vendor/github.com/google/gopacket/layers/fddi.go new file mode 100644 index 0000000000..ed9e1957b9 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/fddi.go @@ -0,0 +1,41 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "github.com/google/gopacket" + "net" +) + +// FDDI contains the header for FDDI frames. +type FDDI struct { + BaseLayer + FrameControl FDDIFrameControl + Priority uint8 + SrcMAC, DstMAC net.HardwareAddr +} + +// LayerType returns LayerTypeFDDI. +func (f *FDDI) LayerType() gopacket.LayerType { return LayerTypeFDDI } + +// LinkFlow returns a new flow of type EndpointMAC. +func (f *FDDI) LinkFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointMAC, f.SrcMAC, f.DstMAC) +} + +func decodeFDDI(data []byte, p gopacket.PacketBuilder) error { + f := &FDDI{ + FrameControl: FDDIFrameControl(data[0] & 0xF8), + Priority: data[0] & 0x07, + SrcMAC: net.HardwareAddr(data[1:7]), + DstMAC: net.HardwareAddr(data[7:13]), + BaseLayer: BaseLayer{data[:13], data[13:]}, + } + p.SetLinkLayer(f) + p.AddLayer(f) + return p.NextDecoder(f.FrameControl) +} diff --git a/vendor/github.com/google/gopacket/layers/fuzz_layer.go b/vendor/github.com/google/gopacket/layers/fuzz_layer.go new file mode 100644 index 0000000000..606e45d24c --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/fuzz_layer.go @@ -0,0 +1,39 @@ +// Copyright 2019 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file in the root of the source tree. + +package layers + +import ( + "encoding/binary" + + "github.com/google/gopacket" +) + +// FuzzLayer is a fuzz target for the layers package of gopacket +// A fuzz target is a function processing a binary blob (byte slice) +// The process here is to interpret this data as a packet, and print the layers contents. +// The decoding options and the starting layer are encoded in the first bytes. +// The function returns 1 if this is a valid packet (no error layer) +func FuzzLayer(data []byte) int { + if len(data) < 3 { + return 0 + } + // use the first two bytes to choose the top level layer + startLayer := binary.BigEndian.Uint16(data[:2]) + var fuzzOpts = gopacket.DecodeOptions{ + Lazy: data[2]&0x1 != 0, + NoCopy: data[2]&0x2 != 0, + SkipDecodeRecovery: data[2]&0x4 != 0, + DecodeStreamsAsDatagrams: data[2]&0x8 != 0, + } + p := gopacket.NewPacket(data[3:], gopacket.LayerType(startLayer), fuzzOpts) + for _, l := range p.Layers() { + gopacket.LayerString(l) + } + if p.ErrorLayer() != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/google/gopacket/layers/gen_linted.sh b/vendor/github.com/google/gopacket/layers/gen_linted.sh new file mode 100644 index 0000000000..75c701f4d5 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/gen_linted.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +for i in *.go; do golint $i | grep -q . || echo $i; done > .linted diff --git a/vendor/github.com/google/gopacket/layers/geneve.go b/vendor/github.com/google/gopacket/layers/geneve.go new file mode 100644 index 0000000000..e9a1428809 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/geneve.go @@ -0,0 +1,121 @@ +// Copyright 2016 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/google/gopacket" +) + +// Geneve is specifed here https://tools.ietf.org/html/draft-ietf-nvo3-geneve-03 +// Geneve Header: +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |Ver| Opt Len |O|C| Rsvd. | Protocol Type | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Virtual Network Identifier (VNI) | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Variable Length Options | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type Geneve struct { + BaseLayer + Version uint8 // 2 bits + OptionsLength uint8 // 6 bits + OAMPacket bool // 1 bits + CriticalOption bool // 1 bits + Protocol EthernetType // 16 bits + VNI uint32 // 24bits + Options []*GeneveOption +} + +// Geneve Tunnel Options +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Option Class | Type |R|R|R| Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Variable Option Data | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type GeneveOption struct { + Class uint16 // 16 bits + Type uint8 // 8 bits + Flags uint8 // 3 bits + Length uint8 // 5 bits + Data []byte +} + +// LayerType returns LayerTypeGeneve +func (gn *Geneve) LayerType() gopacket.LayerType { return LayerTypeGeneve } + +func decodeGeneveOption(data []byte, gn *Geneve, df gopacket.DecodeFeedback) (*GeneveOption, uint8, error) { + if len(data) < 3 { + df.SetTruncated() + return nil, 0, errors.New("geneve option too small") + } + opt := &GeneveOption{} + + opt.Class = binary.BigEndian.Uint16(data[0:2]) + opt.Type = data[2] + opt.Flags = data[3] >> 4 + opt.Length = (data[3]&0xf)*4 + 4 + + if len(data) < int(opt.Length) { + df.SetTruncated() + return nil, 0, errors.New("geneve option too small") + } + opt.Data = make([]byte, opt.Length-4) + copy(opt.Data, data[4:opt.Length]) + + return opt, opt.Length, nil +} + +func (gn *Geneve) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 7 { + df.SetTruncated() + return errors.New("geneve packet too short") + } + + gn.Version = data[0] >> 7 + gn.OptionsLength = (data[0] & 0x3f) * 4 + + gn.OAMPacket = data[1]&0x80 > 0 + gn.CriticalOption = data[1]&0x40 > 0 + gn.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4])) + + var buf [4]byte + copy(buf[1:], data[4:7]) + gn.VNI = binary.BigEndian.Uint32(buf[:]) + + offset, length := uint8(8), int32(gn.OptionsLength) + if len(data) < int(length+7) { + df.SetTruncated() + return errors.New("geneve packet too short") + } + + for length > 0 { + opt, len, err := decodeGeneveOption(data[offset:], gn, df) + if err != nil { + return err + } + gn.Options = append(gn.Options, opt) + + length -= int32(len) + offset += len + } + + gn.BaseLayer = BaseLayer{data[:offset], data[offset:]} + + return nil +} + +func (gn *Geneve) NextLayerType() gopacket.LayerType { + return gn.Protocol.LayerType() +} + +func decodeGeneve(data []byte, p gopacket.PacketBuilder) error { + gn := &Geneve{} + return decodingLayerDecoder(gn, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/gre.go b/vendor/github.com/google/gopacket/layers/gre.go new file mode 100644 index 0000000000..9c5e7d246f --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/gre.go @@ -0,0 +1,200 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + + "github.com/google/gopacket" +) + +// GRE is a Generic Routing Encapsulation header. +type GRE struct { + BaseLayer + ChecksumPresent, RoutingPresent, KeyPresent, SeqPresent, StrictSourceRoute, AckPresent bool + RecursionControl, Flags, Version uint8 + Protocol EthernetType + Checksum, Offset uint16 + Key, Seq, Ack uint32 + *GRERouting +} + +// GRERouting is GRE routing information, present if the RoutingPresent flag is +// set. +type GRERouting struct { + AddressFamily uint16 + SREOffset, SRELength uint8 + RoutingInformation []byte + Next *GRERouting +} + +// LayerType returns gopacket.LayerTypeGRE. +func (g *GRE) LayerType() gopacket.LayerType { return LayerTypeGRE } + +// DecodeFromBytes decodes the given bytes into this layer. +func (g *GRE) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + g.ChecksumPresent = data[0]&0x80 != 0 + g.RoutingPresent = data[0]&0x40 != 0 + g.KeyPresent = data[0]&0x20 != 0 + g.SeqPresent = data[0]&0x10 != 0 + g.StrictSourceRoute = data[0]&0x08 != 0 + g.AckPresent = data[1]&0x80 != 0 + g.RecursionControl = data[0] & 0x7 + g.Flags = data[1] >> 3 + g.Version = data[1] & 0x7 + g.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4])) + offset := 4 + if g.ChecksumPresent || g.RoutingPresent { + g.Checksum = binary.BigEndian.Uint16(data[offset : offset+2]) + g.Offset = binary.BigEndian.Uint16(data[offset+2 : offset+4]) + offset += 4 + } + if g.KeyPresent { + g.Key = binary.BigEndian.Uint32(data[offset : offset+4]) + offset += 4 + } + if g.SeqPresent { + g.Seq = binary.BigEndian.Uint32(data[offset : offset+4]) + offset += 4 + } + if g.RoutingPresent { + tail := &g.GRERouting + for { + sre := &GRERouting{ + AddressFamily: binary.BigEndian.Uint16(data[offset : offset+2]), + SREOffset: data[offset+2], + SRELength: data[offset+3], + } + sre.RoutingInformation = data[offset+4 : offset+4+int(sre.SRELength)] + offset += 4 + int(sre.SRELength) + if sre.AddressFamily == 0 && sre.SRELength == 0 { + break + } + (*tail) = sre + tail = &sre.Next + } + } + if g.AckPresent { + g.Ack = binary.BigEndian.Uint32(data[offset : offset+4]) + offset += 4 + } + g.BaseLayer = BaseLayer{data[:offset], data[offset:]} + return nil +} + +// SerializeTo writes the serialized form of this layer into the SerializationBuffer, +// implementing gopacket.SerializableLayer. See the docs for gopacket.SerializableLayer for more info. +func (g *GRE) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + size := 4 + if g.ChecksumPresent || g.RoutingPresent { + size += 4 + } + if g.KeyPresent { + size += 4 + } + if g.SeqPresent { + size += 4 + } + if g.RoutingPresent { + r := g.GRERouting + for r != nil { + size += 4 + int(r.SRELength) + r = r.Next + } + size += 4 + } + if g.AckPresent { + size += 4 + } + buf, err := b.PrependBytes(size) + if err != nil { + return err + } + // Reset any potentially dirty memory in the first 2 bytes, as these use OR to set flags. + buf[0] = 0 + buf[1] = 0 + if g.ChecksumPresent { + buf[0] |= 0x80 + } + if g.RoutingPresent { + buf[0] |= 0x40 + } + if g.KeyPresent { + buf[0] |= 0x20 + } + if g.SeqPresent { + buf[0] |= 0x10 + } + if g.StrictSourceRoute { + buf[0] |= 0x08 + } + if g.AckPresent { + buf[1] |= 0x80 + } + buf[0] |= g.RecursionControl + buf[1] |= g.Flags << 3 + buf[1] |= g.Version + binary.BigEndian.PutUint16(buf[2:4], uint16(g.Protocol)) + offset := 4 + if g.ChecksumPresent || g.RoutingPresent { + // Don't write the checksum value yet, as we may need to compute it, + // which requires the entire header be complete. + // Instead we zeroize the memory in case it is dirty. + buf[offset] = 0 + buf[offset+1] = 0 + binary.BigEndian.PutUint16(buf[offset+2:offset+4], g.Offset) + offset += 4 + } + if g.KeyPresent { + binary.BigEndian.PutUint32(buf[offset:offset+4], g.Key) + offset += 4 + } + if g.SeqPresent { + binary.BigEndian.PutUint32(buf[offset:offset+4], g.Seq) + offset += 4 + } + if g.RoutingPresent { + sre := g.GRERouting + for sre != nil { + binary.BigEndian.PutUint16(buf[offset:offset+2], sre.AddressFamily) + buf[offset+2] = sre.SREOffset + buf[offset+3] = sre.SRELength + copy(buf[offset+4:offset+4+int(sre.SRELength)], sre.RoutingInformation) + offset += 4 + int(sre.SRELength) + sre = sre.Next + } + // Terminate routing field with a "NULL" SRE. + binary.BigEndian.PutUint32(buf[offset:offset+4], 0) + } + if g.AckPresent { + binary.BigEndian.PutUint32(buf[offset:offset+4], g.Ack) + offset += 4 + } + if g.ChecksumPresent { + if opts.ComputeChecksums { + g.Checksum = tcpipChecksum(b.Bytes(), 0) + } + + binary.BigEndian.PutUint16(buf[4:6], g.Checksum) + } + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (g *GRE) CanDecode() gopacket.LayerClass { + return LayerTypeGRE +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (g *GRE) NextLayerType() gopacket.LayerType { + return g.Protocol.LayerType() +} + +func decodeGRE(data []byte, p gopacket.PacketBuilder) error { + g := &GRE{} + return decodingLayerDecoder(g, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/gtp.go b/vendor/github.com/google/gopacket/layers/gtp.go new file mode 100644 index 0000000000..fe3054a6d0 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/gtp.go @@ -0,0 +1,184 @@ +// Copyright 2017 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. +// + +package layers + +import ( + "encoding/binary" + "fmt" + "github.com/google/gopacket" +) + +const gtpMinimumSizeInBytes int = 8 + +// GTPExtensionHeader is used to carry extra data and enable future extensions of the GTP without the need to use another version number. +type GTPExtensionHeader struct { + Type uint8 + Content []byte +} + +// GTPv1U protocol is used to exchange user data over GTP tunnels across the Sx interfaces. +// Defined in https://portal.3gpp.org/desktopmodules/Specifications/SpecificationDetails.aspx?specificationId=1595 +type GTPv1U struct { + BaseLayer + Version uint8 + ProtocolType uint8 + Reserved uint8 + ExtensionHeaderFlag bool + SequenceNumberFlag bool + NPDUFlag bool + MessageType uint8 + MessageLength uint16 + TEID uint32 + SequenceNumber uint16 + NPDU uint8 + GTPExtensionHeaders []GTPExtensionHeader +} + +// LayerType returns LayerTypeGTPV1U +func (g *GTPv1U) LayerType() gopacket.LayerType { return LayerTypeGTPv1U } + +// DecodeFromBytes analyses a byte slice and attempts to decode it as a GTPv1U packet +func (g *GTPv1U) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + hLen := gtpMinimumSizeInBytes + dLen := len(data) + if dLen < hLen { + return fmt.Errorf("GTP packet too small: %d bytes", dLen) + } + g.Version = (data[0] >> 5) & 0x07 + g.ProtocolType = (data[0] >> 4) & 0x01 + g.Reserved = (data[0] >> 3) & 0x01 + g.SequenceNumberFlag = ((data[0] >> 1) & 0x01) == 1 + g.NPDUFlag = (data[0] & 0x01) == 1 + g.ExtensionHeaderFlag = ((data[0] >> 2) & 0x01) == 1 + g.MessageType = data[1] + g.MessageLength = binary.BigEndian.Uint16(data[2:4]) + pLen := 8 + g.MessageLength + if uint16(dLen) < pLen { + return fmt.Errorf("GTP packet too small: %d bytes", dLen) + } + // Field used to multiplex different connections in the same GTP tunnel. + g.TEID = binary.BigEndian.Uint32(data[4:8]) + cIndex := uint16(hLen) + if g.SequenceNumberFlag || g.NPDUFlag || g.ExtensionHeaderFlag { + hLen += 4 + cIndex += 4 + if dLen < hLen { + return fmt.Errorf("GTP packet too small: %d bytes", dLen) + } + if g.SequenceNumberFlag { + g.SequenceNumber = binary.BigEndian.Uint16(data[8:10]) + } + if g.NPDUFlag { + g.NPDU = data[10] + } + if g.ExtensionHeaderFlag { + extensionFlag := true + for extensionFlag { + extensionType := uint8(data[cIndex-1]) + extensionLength := uint(data[cIndex]) + if extensionLength == 0 { + return fmt.Errorf("GTP packet with invalid extension header") + } + // extensionLength is in 4-octet units + lIndex := cIndex + (uint16(extensionLength) * 4) + if uint16(dLen) < lIndex { + fmt.Println(dLen, lIndex) + return fmt.Errorf("GTP packet with small extension header: %d bytes", dLen) + } + content := data[cIndex+1 : lIndex-1] + eh := GTPExtensionHeader{Type: extensionType, Content: content} + g.GTPExtensionHeaders = append(g.GTPExtensionHeaders, eh) + cIndex = lIndex + // Check if coming bytes are from an extension header + extensionFlag = data[cIndex-1] != 0 + + } + } + } + g.BaseLayer = BaseLayer{Contents: data[:cIndex], Payload: data[cIndex:]} + return nil + +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (g *GTPv1U) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + data, err := b.PrependBytes(gtpMinimumSizeInBytes) + if err != nil { + return err + } + data[0] |= (g.Version << 5) + data[0] |= (1 << 4) + if len(g.GTPExtensionHeaders) > 0 { + data[0] |= 0x04 + g.ExtensionHeaderFlag = true + } + if g.SequenceNumberFlag { + data[0] |= 0x02 + } + if g.NPDUFlag { + data[0] |= 0x01 + } + data[1] = g.MessageType + binary.BigEndian.PutUint16(data[2:4], g.MessageLength) + binary.BigEndian.PutUint32(data[4:8], g.TEID) + if g.ExtensionHeaderFlag || g.SequenceNumberFlag || g.NPDUFlag { + data, err := b.AppendBytes(4) + if err != nil { + return err + } + binary.BigEndian.PutUint16(data[:2], g.SequenceNumber) + data[2] = g.NPDU + for _, eh := range g.GTPExtensionHeaders { + data[len(data)-1] = eh.Type + lContent := len(eh.Content) + // extensionLength is in 4-octet units + extensionLength := (lContent + 2) / 4 + // Get two extra byte for the next extension header type and length + data, err = b.AppendBytes(lContent + 2) + if err != nil { + return err + } + data[0] = byte(extensionLength) + copy(data[1:lContent+1], eh.Content) + } + } + return nil + +} + +// CanDecode returns a set of layers that GTP objects can decode. +func (g *GTPv1U) CanDecode() gopacket.LayerClass { + return LayerTypeGTPv1U +} + +// NextLayerType specifies the next layer that GoPacket should attempt to +func (g *GTPv1U) NextLayerType() gopacket.LayerType { + if len(g.LayerPayload()) == 0 { + return gopacket.LayerTypeZero + } + version := uint8(g.LayerPayload()[0]) >> 4 + if version == 4 { + return LayerTypeIPv4 + } else if version == 6 { + return LayerTypeIPv6 + } else { + return LayerTypePPP + } +} + +func decodeGTPv1u(data []byte, p gopacket.PacketBuilder) error { + gtp := >Pv1U{} + err := gtp.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(gtp) + return p.NextDecoder(gtp.NextLayerType()) +} diff --git a/vendor/github.com/google/gopacket/layers/iana_ports.go b/vendor/github.com/google/gopacket/layers/iana_ports.go new file mode 100644 index 0000000000..ddcf3ecdb7 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/iana_ports.go @@ -0,0 +1,11351 @@ +// Copyright 2012 Google, Inc. All rights reserved. + +package layers + +// Created by gen.go, don't edit manually +// Generated at 2017-10-23 09:57:28.214859163 -0600 MDT m=+1.011679290 +// Fetched from "http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xml" + +// TCPPortNames contains the port names for all TCP ports. +var TCPPortNames = tcpPortNames + +// UDPPortNames contains the port names for all UDP ports. +var UDPPortNames = udpPortNames + +// SCTPPortNames contains the port names for all SCTP ports. +var SCTPPortNames = sctpPortNames + +var tcpPortNames = map[TCPPort]string{ + 1: "tcpmux", + 2: "compressnet", + 3: "compressnet", + 5: "rje", + 7: "echo", + 9: "discard", + 11: "systat", + 13: "daytime", + 17: "qotd", + 18: "msp", + 19: "chargen", + 20: "ftp-data", + 21: "ftp", + 22: "ssh", + 23: "telnet", + 25: "smtp", + 27: "nsw-fe", + 29: "msg-icp", + 31: "msg-auth", + 33: "dsp", + 37: "time", + 38: "rap", + 39: "rlp", + 41: "graphics", + 42: "name", + 43: "nicname", + 44: "mpm-flags", + 45: "mpm", + 46: "mpm-snd", + 48: "auditd", + 49: "tacacs", + 50: "re-mail-ck", + 52: "xns-time", + 53: "domain", + 54: "xns-ch", + 55: "isi-gl", + 56: "xns-auth", + 58: "xns-mail", + 62: "acas", + 63: "whoispp", + 64: "covia", + 65: "tacacs-ds", + 66: "sql-net", + 67: "bootps", + 68: "bootpc", + 69: "tftp", + 70: "gopher", + 71: "netrjs-1", + 72: "netrjs-2", + 73: "netrjs-3", + 74: "netrjs-4", + 76: "deos", + 78: "vettcp", + 79: "finger", + 80: "http", + 82: "xfer", + 83: "mit-ml-dev", + 84: "ctf", + 85: "mit-ml-dev", + 86: "mfcobol", + 88: "kerberos", + 89: "su-mit-tg", + 90: "dnsix", + 91: "mit-dov", + 92: "npp", + 93: "dcp", + 94: "objcall", + 95: "supdup", + 96: "dixie", + 97: "swift-rvf", + 98: "tacnews", + 99: "metagram", + 101: "hostname", + 102: "iso-tsap", + 103: "gppitnp", + 104: "acr-nema", + 105: "cso", + 106: "3com-tsmux", + 107: "rtelnet", + 108: "snagas", + 109: "pop2", + 110: "pop3", + 111: "sunrpc", + 112: "mcidas", + 113: "ident", + 115: "sftp", + 116: "ansanotify", + 117: "uucp-path", + 118: "sqlserv", + 119: "nntp", + 120: "cfdptkt", + 121: "erpc", + 122: "smakynet", + 123: "ntp", + 124: "ansatrader", + 125: "locus-map", + 126: "nxedit", + 127: "locus-con", + 128: "gss-xlicen", + 129: "pwdgen", + 130: "cisco-fna", + 131: "cisco-tna", + 132: "cisco-sys", + 133: "statsrv", + 134: "ingres-net", + 135: "epmap", + 136: "profile", + 137: "netbios-ns", + 138: "netbios-dgm", + 139: "netbios-ssn", + 140: "emfis-data", + 141: "emfis-cntl", + 142: "bl-idm", + 143: "imap", + 144: "uma", + 145: "uaac", + 146: "iso-tp0", + 147: "iso-ip", + 148: "jargon", + 149: "aed-512", + 150: "sql-net", + 151: "hems", + 152: "bftp", + 153: "sgmp", + 154: "netsc-prod", + 155: "netsc-dev", + 156: "sqlsrv", + 157: "knet-cmp", + 158: "pcmail-srv", + 159: "nss-routing", + 160: "sgmp-traps", + 161: "snmp", + 162: "snmptrap", + 163: "cmip-man", + 164: "cmip-agent", + 165: "xns-courier", + 166: "s-net", + 167: "namp", + 168: "rsvd", + 169: "send", + 170: "print-srv", + 171: "multiplex", + 172: "cl-1", + 173: "xyplex-mux", + 174: "mailq", + 175: "vmnet", + 176: "genrad-mux", + 177: "xdmcp", + 178: "nextstep", + 179: "bgp", + 180: "ris", + 181: "unify", + 182: "audit", + 183: "ocbinder", + 184: "ocserver", + 185: "remote-kis", + 186: "kis", + 187: "aci", + 188: "mumps", + 189: "qft", + 190: "gacp", + 191: "prospero", + 192: "osu-nms", + 193: "srmp", + 194: "irc", + 195: "dn6-nlm-aud", + 196: "dn6-smm-red", + 197: "dls", + 198: "dls-mon", + 199: "smux", + 200: "src", + 201: "at-rtmp", + 202: "at-nbp", + 203: "at-3", + 204: "at-echo", + 205: "at-5", + 206: "at-zis", + 207: "at-7", + 208: "at-8", + 209: "qmtp", + 210: "z39-50", + 211: "914c-g", + 212: "anet", + 213: "ipx", + 214: "vmpwscs", + 215: "softpc", + 216: "CAIlic", + 217: "dbase", + 218: "mpp", + 219: "uarps", + 220: "imap3", + 221: "fln-spx", + 222: "rsh-spx", + 223: "cdc", + 224: "masqdialer", + 242: "direct", + 243: "sur-meas", + 244: "inbusiness", + 245: "link", + 246: "dsp3270", + 247: "subntbcst-tftp", + 248: "bhfhs", + 256: "rap", + 257: "set", + 259: "esro-gen", + 260: "openport", + 261: "nsiiops", + 262: "arcisdms", + 263: "hdap", + 264: "bgmp", + 265: "x-bone-ctl", + 266: "sst", + 267: "td-service", + 268: "td-replica", + 269: "manet", + 271: "pt-tls", + 280: "http-mgmt", + 281: "personal-link", + 282: "cableport-ax", + 283: "rescap", + 284: "corerjd", + 286: "fxp", + 287: "k-block", + 308: "novastorbakcup", + 309: "entrusttime", + 310: "bhmds", + 311: "asip-webadmin", + 312: "vslmp", + 313: "magenta-logic", + 314: "opalis-robot", + 315: "dpsi", + 316: "decauth", + 317: "zannet", + 318: "pkix-timestamp", + 319: "ptp-event", + 320: "ptp-general", + 321: "pip", + 322: "rtsps", + 323: "rpki-rtr", + 324: "rpki-rtr-tls", + 333: "texar", + 344: "pdap", + 345: "pawserv", + 346: "zserv", + 347: "fatserv", + 348: "csi-sgwp", + 349: "mftp", + 350: "matip-type-a", + 351: "matip-type-b", + 352: "dtag-ste-sb", + 353: "ndsauth", + 354: "bh611", + 355: "datex-asn", + 356: "cloanto-net-1", + 357: "bhevent", + 358: "shrinkwrap", + 359: "nsrmp", + 360: "scoi2odialog", + 361: "semantix", + 362: "srssend", + 363: "rsvp-tunnel", + 364: "aurora-cmgr", + 365: "dtk", + 366: "odmr", + 367: "mortgageware", + 368: "qbikgdp", + 369: "rpc2portmap", + 370: "codaauth2", + 371: "clearcase", + 372: "ulistproc", + 373: "legent-1", + 374: "legent-2", + 375: "hassle", + 376: "nip", + 377: "tnETOS", + 378: "dsETOS", + 379: "is99c", + 380: "is99s", + 381: "hp-collector", + 382: "hp-managed-node", + 383: "hp-alarm-mgr", + 384: "arns", + 385: "ibm-app", + 386: "asa", + 387: "aurp", + 388: "unidata-ldm", + 389: "ldap", + 390: "uis", + 391: "synotics-relay", + 392: "synotics-broker", + 393: "meta5", + 394: "embl-ndt", + 395: "netcp", + 396: "netware-ip", + 397: "mptn", + 398: "kryptolan", + 399: "iso-tsap-c2", + 400: "osb-sd", + 401: "ups", + 402: "genie", + 403: "decap", + 404: "nced", + 405: "ncld", + 406: "imsp", + 407: "timbuktu", + 408: "prm-sm", + 409: "prm-nm", + 410: "decladebug", + 411: "rmt", + 412: "synoptics-trap", + 413: "smsp", + 414: "infoseek", + 415: "bnet", + 416: "silverplatter", + 417: "onmux", + 418: "hyper-g", + 419: "ariel1", + 420: "smpte", + 421: "ariel2", + 422: "ariel3", + 423: "opc-job-start", + 424: "opc-job-track", + 425: "icad-el", + 426: "smartsdp", + 427: "svrloc", + 428: "ocs-cmu", + 429: "ocs-amu", + 430: "utmpsd", + 431: "utmpcd", + 432: "iasd", + 433: "nnsp", + 434: "mobileip-agent", + 435: "mobilip-mn", + 436: "dna-cml", + 437: "comscm", + 438: "dsfgw", + 439: "dasp", + 440: "sgcp", + 441: "decvms-sysmgt", + 442: "cvc-hostd", + 443: "https", + 444: "snpp", + 445: "microsoft-ds", + 446: "ddm-rdb", + 447: "ddm-dfm", + 448: "ddm-ssl", + 449: "as-servermap", + 450: "tserver", + 451: "sfs-smp-net", + 452: "sfs-config", + 453: "creativeserver", + 454: "contentserver", + 455: "creativepartnr", + 456: "macon-tcp", + 457: "scohelp", + 458: "appleqtc", + 459: "ampr-rcmd", + 460: "skronk", + 461: "datasurfsrv", + 462: "datasurfsrvsec", + 463: "alpes", + 464: "kpasswd", + 465: "urd", + 466: "digital-vrc", + 467: "mylex-mapd", + 468: "photuris", + 469: "rcp", + 470: "scx-proxy", + 471: "mondex", + 472: "ljk-login", + 473: "hybrid-pop", + 474: "tn-tl-w1", + 475: "tcpnethaspsrv", + 476: "tn-tl-fd1", + 477: "ss7ns", + 478: "spsc", + 479: "iafserver", + 480: "iafdbase", + 481: "ph", + 482: "bgs-nsi", + 483: "ulpnet", + 484: "integra-sme", + 485: "powerburst", + 486: "avian", + 487: "saft", + 488: "gss-http", + 489: "nest-protocol", + 490: "micom-pfs", + 491: "go-login", + 492: "ticf-1", + 493: "ticf-2", + 494: "pov-ray", + 495: "intecourier", + 496: "pim-rp-disc", + 497: "retrospect", + 498: "siam", + 499: "iso-ill", + 500: "isakmp", + 501: "stmf", + 502: "mbap", + 503: "intrinsa", + 504: "citadel", + 505: "mailbox-lm", + 506: "ohimsrv", + 507: "crs", + 508: "xvttp", + 509: "snare", + 510: "fcp", + 511: "passgo", + 512: "exec", + 513: "login", + 514: "shell", + 515: "printer", + 516: "videotex", + 517: "talk", + 518: "ntalk", + 519: "utime", + 520: "efs", + 521: "ripng", + 522: "ulp", + 523: "ibm-db2", + 524: "ncp", + 525: "timed", + 526: "tempo", + 527: "stx", + 528: "custix", + 529: "irc-serv", + 530: "courier", + 531: "conference", + 532: "netnews", + 533: "netwall", + 534: "windream", + 535: "iiop", + 536: "opalis-rdv", + 537: "nmsp", + 538: "gdomap", + 539: "apertus-ldp", + 540: "uucp", + 541: "uucp-rlogin", + 542: "commerce", + 543: "klogin", + 544: "kshell", + 545: "appleqtcsrvr", + 546: "dhcpv6-client", + 547: "dhcpv6-server", + 548: "afpovertcp", + 549: "idfp", + 550: "new-rwho", + 551: "cybercash", + 552: "devshr-nts", + 553: "pirp", + 554: "rtsp", + 555: "dsf", + 556: "remotefs", + 557: "openvms-sysipc", + 558: "sdnskmp", + 559: "teedtap", + 560: "rmonitor", + 561: "monitor", + 562: "chshell", + 563: "nntps", + 564: "9pfs", + 565: "whoami", + 566: "streettalk", + 567: "banyan-rpc", + 568: "ms-shuttle", + 569: "ms-rome", + 570: "meter", + 571: "meter", + 572: "sonar", + 573: "banyan-vip", + 574: "ftp-agent", + 575: "vemmi", + 576: "ipcd", + 577: "vnas", + 578: "ipdd", + 579: "decbsrv", + 580: "sntp-heartbeat", + 581: "bdp", + 582: "scc-security", + 583: "philips-vc", + 584: "keyserver", + 586: "password-chg", + 587: "submission", + 588: "cal", + 589: "eyelink", + 590: "tns-cml", + 591: "http-alt", + 592: "eudora-set", + 593: "http-rpc-epmap", + 594: "tpip", + 595: "cab-protocol", + 596: "smsd", + 597: "ptcnameservice", + 598: "sco-websrvrmg3", + 599: "acp", + 600: "ipcserver", + 601: "syslog-conn", + 602: "xmlrpc-beep", + 603: "idxp", + 604: "tunnel", + 605: "soap-beep", + 606: "urm", + 607: "nqs", + 608: "sift-uft", + 609: "npmp-trap", + 610: "npmp-local", + 611: "npmp-gui", + 612: "hmmp-ind", + 613: "hmmp-op", + 614: "sshell", + 615: "sco-inetmgr", + 616: "sco-sysmgr", + 617: "sco-dtmgr", + 618: "dei-icda", + 619: "compaq-evm", + 620: "sco-websrvrmgr", + 621: "escp-ip", + 622: "collaborator", + 623: "oob-ws-http", + 624: "cryptoadmin", + 625: "dec-dlm", + 626: "asia", + 627: "passgo-tivoli", + 628: "qmqp", + 629: "3com-amp3", + 630: "rda", + 631: "ipp", + 632: "bmpp", + 633: "servstat", + 634: "ginad", + 635: "rlzdbase", + 636: "ldaps", + 637: "lanserver", + 638: "mcns-sec", + 639: "msdp", + 640: "entrust-sps", + 641: "repcmd", + 642: "esro-emsdp", + 643: "sanity", + 644: "dwr", + 645: "pssc", + 646: "ldp", + 647: "dhcp-failover", + 648: "rrp", + 649: "cadview-3d", + 650: "obex", + 651: "ieee-mms", + 652: "hello-port", + 653: "repscmd", + 654: "aodv", + 655: "tinc", + 656: "spmp", + 657: "rmc", + 658: "tenfold", + 660: "mac-srvr-admin", + 661: "hap", + 662: "pftp", + 663: "purenoise", + 664: "oob-ws-https", + 665: "sun-dr", + 666: "mdqs", + 667: "disclose", + 668: "mecomm", + 669: "meregister", + 670: "vacdsm-sws", + 671: "vacdsm-app", + 672: "vpps-qua", + 673: "cimplex", + 674: "acap", + 675: "dctp", + 676: "vpps-via", + 677: "vpp", + 678: "ggf-ncp", + 679: "mrm", + 680: "entrust-aaas", + 681: "entrust-aams", + 682: "xfr", + 683: "corba-iiop", + 684: "corba-iiop-ssl", + 685: "mdc-portmapper", + 686: "hcp-wismar", + 687: "asipregistry", + 688: "realm-rusd", + 689: "nmap", + 690: "vatp", + 691: "msexch-routing", + 692: "hyperwave-isp", + 693: "connendp", + 694: "ha-cluster", + 695: "ieee-mms-ssl", + 696: "rushd", + 697: "uuidgen", + 698: "olsr", + 699: "accessnetwork", + 700: "epp", + 701: "lmp", + 702: "iris-beep", + 704: "elcsd", + 705: "agentx", + 706: "silc", + 707: "borland-dsj", + 709: "entrust-kmsh", + 710: "entrust-ash", + 711: "cisco-tdp", + 712: "tbrpf", + 713: "iris-xpc", + 714: "iris-xpcs", + 715: "iris-lwz", + 729: "netviewdm1", + 730: "netviewdm2", + 731: "netviewdm3", + 741: "netgw", + 742: "netrcs", + 744: "flexlm", + 747: "fujitsu-dev", + 748: "ris-cm", + 749: "kerberos-adm", + 750: "rfile", + 751: "pump", + 752: "qrh", + 753: "rrh", + 754: "tell", + 758: "nlogin", + 759: "con", + 760: "ns", + 761: "rxe", + 762: "quotad", + 763: "cycleserv", + 764: "omserv", + 765: "webster", + 767: "phonebook", + 769: "vid", + 770: "cadlock", + 771: "rtip", + 772: "cycleserv2", + 773: "submit", + 774: "rpasswd", + 775: "entomb", + 776: "wpages", + 777: "multiling-http", + 780: "wpgs", + 800: "mdbs-daemon", + 801: "device", + 802: "mbap-s", + 810: "fcp-udp", + 828: "itm-mcell-s", + 829: "pkix-3-ca-ra", + 830: "netconf-ssh", + 831: "netconf-beep", + 832: "netconfsoaphttp", + 833: "netconfsoapbeep", + 847: "dhcp-failover2", + 848: "gdoi", + 853: "domain-s", + 854: "dlep", + 860: "iscsi", + 861: "owamp-control", + 862: "twamp-control", + 873: "rsync", + 886: "iclcnet-locate", + 887: "iclcnet-svinfo", + 888: "accessbuilder", + 900: "omginitialrefs", + 901: "smpnameres", + 902: "ideafarm-door", + 903: "ideafarm-panic", + 910: "kink", + 911: "xact-backup", + 912: "apex-mesh", + 913: "apex-edge", + 953: "rndc", + 989: "ftps-data", + 990: "ftps", + 991: "nas", + 992: "telnets", + 993: "imaps", + 995: "pop3s", + 996: "vsinet", + 997: "maitrd", + 998: "busboy", + 999: "garcon", + 1000: "cadlock2", + 1001: "webpush", + 1010: "surf", + 1021: "exp1", + 1022: "exp2", + 1025: "blackjack", + 1026: "cap", + 1029: "solid-mux", + 1033: "netinfo-local", + 1034: "activesync", + 1035: "mxxrlogin", + 1036: "nsstp", + 1037: "ams", + 1038: "mtqp", + 1039: "sbl", + 1040: "netarx", + 1041: "danf-ak2", + 1042: "afrog", + 1043: "boinc-client", + 1044: "dcutility", + 1045: "fpitp", + 1046: "wfremotertm", + 1047: "neod1", + 1048: "neod2", + 1049: "td-postman", + 1050: "cma", + 1051: "optima-vnet", + 1052: "ddt", + 1053: "remote-as", + 1054: "brvread", + 1055: "ansyslmd", + 1056: "vfo", + 1057: "startron", + 1058: "nim", + 1059: "nimreg", + 1060: "polestar", + 1061: "kiosk", + 1062: "veracity", + 1063: "kyoceranetdev", + 1064: "jstel", + 1065: "syscomlan", + 1066: "fpo-fns", + 1067: "instl-boots", + 1068: "instl-bootc", + 1069: "cognex-insight", + 1070: "gmrupdateserv", + 1071: "bsquare-voip", + 1072: "cardax", + 1073: "bridgecontrol", + 1074: "warmspotMgmt", + 1075: "rdrmshc", + 1076: "dab-sti-c", + 1077: "imgames", + 1078: "avocent-proxy", + 1079: "asprovatalk", + 1080: "socks", + 1081: "pvuniwien", + 1082: "amt-esd-prot", + 1083: "ansoft-lm-1", + 1084: "ansoft-lm-2", + 1085: "webobjects", + 1086: "cplscrambler-lg", + 1087: "cplscrambler-in", + 1088: "cplscrambler-al", + 1089: "ff-annunc", + 1090: "ff-fms", + 1091: "ff-sm", + 1092: "obrpd", + 1093: "proofd", + 1094: "rootd", + 1095: "nicelink", + 1096: "cnrprotocol", + 1097: "sunclustermgr", + 1098: "rmiactivation", + 1099: "rmiregistry", + 1100: "mctp", + 1101: "pt2-discover", + 1102: "adobeserver-1", + 1103: "adobeserver-2", + 1104: "xrl", + 1105: "ftranhc", + 1106: "isoipsigport-1", + 1107: "isoipsigport-2", + 1108: "ratio-adp", + 1110: "webadmstart", + 1111: "lmsocialserver", + 1112: "icp", + 1113: "ltp-deepspace", + 1114: "mini-sql", + 1115: "ardus-trns", + 1116: "ardus-cntl", + 1117: "ardus-mtrns", + 1118: "sacred", + 1119: "bnetgame", + 1120: "bnetfile", + 1121: "rmpp", + 1122: "availant-mgr", + 1123: "murray", + 1124: "hpvmmcontrol", + 1125: "hpvmmagent", + 1126: "hpvmmdata", + 1127: "kwdb-commn", + 1128: "saphostctrl", + 1129: "saphostctrls", + 1130: "casp", + 1131: "caspssl", + 1132: "kvm-via-ip", + 1133: "dfn", + 1134: "aplx", + 1135: "omnivision", + 1136: "hhb-gateway", + 1137: "trim", + 1138: "encrypted-admin", + 1139: "evm", + 1140: "autonoc", + 1141: "mxomss", + 1142: "edtools", + 1143: "imyx", + 1144: "fuscript", + 1145: "x9-icue", + 1146: "audit-transfer", + 1147: "capioverlan", + 1148: "elfiq-repl", + 1149: "bvtsonar", + 1150: "blaze", + 1151: "unizensus", + 1152: "winpoplanmess", + 1153: "c1222-acse", + 1154: "resacommunity", + 1155: "nfa", + 1156: "iascontrol-oms", + 1157: "iascontrol", + 1158: "dbcontrol-oms", + 1159: "oracle-oms", + 1160: "olsv", + 1161: "health-polling", + 1162: "health-trap", + 1163: "sddp", + 1164: "qsm-proxy", + 1165: "qsm-gui", + 1166: "qsm-remote", + 1167: "cisco-ipsla", + 1168: "vchat", + 1169: "tripwire", + 1170: "atc-lm", + 1171: "atc-appserver", + 1172: "dnap", + 1173: "d-cinema-rrp", + 1174: "fnet-remote-ui", + 1175: "dossier", + 1176: "indigo-server", + 1177: "dkmessenger", + 1178: "sgi-storman", + 1179: "b2n", + 1180: "mc-client", + 1181: "3comnetman", + 1182: "accelenet", + 1183: "llsurfup-http", + 1184: "llsurfup-https", + 1185: "catchpole", + 1186: "mysql-cluster", + 1187: "alias", + 1188: "hp-webadmin", + 1189: "unet", + 1190: "commlinx-avl", + 1191: "gpfs", + 1192: "caids-sensor", + 1193: "fiveacross", + 1194: "openvpn", + 1195: "rsf-1", + 1196: "netmagic", + 1197: "carrius-rshell", + 1198: "cajo-discovery", + 1199: "dmidi", + 1200: "scol", + 1201: "nucleus-sand", + 1202: "caiccipc", + 1203: "ssslic-mgr", + 1204: "ssslog-mgr", + 1205: "accord-mgc", + 1206: "anthony-data", + 1207: "metasage", + 1208: "seagull-ais", + 1209: "ipcd3", + 1210: "eoss", + 1211: "groove-dpp", + 1212: "lupa", + 1213: "mpc-lifenet", + 1214: "kazaa", + 1215: "scanstat-1", + 1216: "etebac5", + 1217: "hpss-ndapi", + 1218: "aeroflight-ads", + 1219: "aeroflight-ret", + 1220: "qt-serveradmin", + 1221: "sweetware-apps", + 1222: "nerv", + 1223: "tgp", + 1224: "vpnz", + 1225: "slinkysearch", + 1226: "stgxfws", + 1227: "dns2go", + 1228: "florence", + 1229: "zented", + 1230: "periscope", + 1231: "menandmice-lpm", + 1232: "first-defense", + 1233: "univ-appserver", + 1234: "search-agent", + 1235: "mosaicsyssvc1", + 1236: "bvcontrol", + 1237: "tsdos390", + 1238: "hacl-qs", + 1239: "nmsd", + 1240: "instantia", + 1241: "nessus", + 1242: "nmasoverip", + 1243: "serialgateway", + 1244: "isbconference1", + 1245: "isbconference2", + 1246: "payrouter", + 1247: "visionpyramid", + 1248: "hermes", + 1249: "mesavistaco", + 1250: "swldy-sias", + 1251: "servergraph", + 1252: "bspne-pcc", + 1253: "q55-pcc", + 1254: "de-noc", + 1255: "de-cache-query", + 1256: "de-server", + 1257: "shockwave2", + 1258: "opennl", + 1259: "opennl-voice", + 1260: "ibm-ssd", + 1261: "mpshrsv", + 1262: "qnts-orb", + 1263: "dka", + 1264: "prat", + 1265: "dssiapi", + 1266: "dellpwrappks", + 1267: "epc", + 1268: "propel-msgsys", + 1269: "watilapp", + 1270: "opsmgr", + 1271: "excw", + 1272: "cspmlockmgr", + 1273: "emc-gateway", + 1274: "t1distproc", + 1275: "ivcollector", + 1277: "miva-mqs", + 1278: "dellwebadmin-1", + 1279: "dellwebadmin-2", + 1280: "pictrography", + 1281: "healthd", + 1282: "emperion", + 1283: "productinfo", + 1284: "iee-qfx", + 1285: "neoiface", + 1286: "netuitive", + 1287: "routematch", + 1288: "navbuddy", + 1289: "jwalkserver", + 1290: "winjaserver", + 1291: "seagulllms", + 1292: "dsdn", + 1293: "pkt-krb-ipsec", + 1294: "cmmdriver", + 1295: "ehtp", + 1296: "dproxy", + 1297: "sdproxy", + 1298: "lpcp", + 1299: "hp-sci", + 1300: "h323hostcallsc", + 1301: "ci3-software-1", + 1302: "ci3-software-2", + 1303: "sftsrv", + 1304: "boomerang", + 1305: "pe-mike", + 1306: "re-conn-proto", + 1307: "pacmand", + 1308: "odsi", + 1309: "jtag-server", + 1310: "husky", + 1311: "rxmon", + 1312: "sti-envision", + 1313: "bmc-patroldb", + 1314: "pdps", + 1315: "els", + 1316: "exbit-escp", + 1317: "vrts-ipcserver", + 1318: "krb5gatekeeper", + 1319: "amx-icsp", + 1320: "amx-axbnet", + 1321: "pip", + 1322: "novation", + 1323: "brcd", + 1324: "delta-mcp", + 1325: "dx-instrument", + 1326: "wimsic", + 1327: "ultrex", + 1328: "ewall", + 1329: "netdb-export", + 1330: "streetperfect", + 1331: "intersan", + 1332: "pcia-rxp-b", + 1333: "passwrd-policy", + 1334: "writesrv", + 1335: "digital-notary", + 1336: "ischat", + 1337: "menandmice-dns", + 1338: "wmc-log-svc", + 1339: "kjtsiteserver", + 1340: "naap", + 1341: "qubes", + 1342: "esbroker", + 1343: "re101", + 1344: "icap", + 1345: "vpjp", + 1346: "alta-ana-lm", + 1347: "bbn-mmc", + 1348: "bbn-mmx", + 1349: "sbook", + 1350: "editbench", + 1351: "equationbuilder", + 1352: "lotusnote", + 1353: "relief", + 1354: "XSIP-network", + 1355: "intuitive-edge", + 1356: "cuillamartin", + 1357: "pegboard", + 1358: "connlcli", + 1359: "ftsrv", + 1360: "mimer", + 1361: "linx", + 1362: "timeflies", + 1363: "ndm-requester", + 1364: "ndm-server", + 1365: "adapt-sna", + 1366: "netware-csp", + 1367: "dcs", + 1368: "screencast", + 1369: "gv-us", + 1370: "us-gv", + 1371: "fc-cli", + 1372: "fc-ser", + 1373: "chromagrafx", + 1374: "molly", + 1375: "bytex", + 1376: "ibm-pps", + 1377: "cichlid", + 1378: "elan", + 1379: "dbreporter", + 1380: "telesis-licman", + 1381: "apple-licman", + 1382: "udt-os", + 1383: "gwha", + 1384: "os-licman", + 1385: "atex-elmd", + 1386: "checksum", + 1387: "cadsi-lm", + 1388: "objective-dbc", + 1389: "iclpv-dm", + 1390: "iclpv-sc", + 1391: "iclpv-sas", + 1392: "iclpv-pm", + 1393: "iclpv-nls", + 1394: "iclpv-nlc", + 1395: "iclpv-wsm", + 1396: "dvl-activemail", + 1397: "audio-activmail", + 1398: "video-activmail", + 1399: "cadkey-licman", + 1400: "cadkey-tablet", + 1401: "goldleaf-licman", + 1402: "prm-sm-np", + 1403: "prm-nm-np", + 1404: "igi-lm", + 1405: "ibm-res", + 1406: "netlabs-lm", + 1407: "tibet-server", + 1408: "sophia-lm", + 1409: "here-lm", + 1410: "hiq", + 1411: "af", + 1412: "innosys", + 1413: "innosys-acl", + 1414: "ibm-mqseries", + 1415: "dbstar", + 1416: "novell-lu6-2", + 1417: "timbuktu-srv1", + 1418: "timbuktu-srv2", + 1419: "timbuktu-srv3", + 1420: "timbuktu-srv4", + 1421: "gandalf-lm", + 1422: "autodesk-lm", + 1423: "essbase", + 1424: "hybrid", + 1425: "zion-lm", + 1426: "sais", + 1427: "mloadd", + 1428: "informatik-lm", + 1429: "nms", + 1430: "tpdu", + 1431: "rgtp", + 1432: "blueberry-lm", + 1433: "ms-sql-s", + 1434: "ms-sql-m", + 1435: "ibm-cics", + 1436: "saism", + 1437: "tabula", + 1438: "eicon-server", + 1439: "eicon-x25", + 1440: "eicon-slp", + 1441: "cadis-1", + 1442: "cadis-2", + 1443: "ies-lm", + 1444: "marcam-lm", + 1445: "proxima-lm", + 1446: "ora-lm", + 1447: "apri-lm", + 1448: "oc-lm", + 1449: "peport", + 1450: "dwf", + 1451: "infoman", + 1452: "gtegsc-lm", + 1453: "genie-lm", + 1454: "interhdl-elmd", + 1455: "esl-lm", + 1456: "dca", + 1457: "valisys-lm", + 1458: "nrcabq-lm", + 1459: "proshare1", + 1460: "proshare2", + 1461: "ibm-wrless-lan", + 1462: "world-lm", + 1463: "nucleus", + 1464: "msl-lmd", + 1465: "pipes", + 1466: "oceansoft-lm", + 1467: "csdmbase", + 1468: "csdm", + 1469: "aal-lm", + 1470: "uaiact", + 1471: "csdmbase", + 1472: "csdm", + 1473: "openmath", + 1474: "telefinder", + 1475: "taligent-lm", + 1476: "clvm-cfg", + 1477: "ms-sna-server", + 1478: "ms-sna-base", + 1479: "dberegister", + 1480: "pacerforum", + 1481: "airs", + 1482: "miteksys-lm", + 1483: "afs", + 1484: "confluent", + 1485: "lansource", + 1486: "nms-topo-serv", + 1487: "localinfosrvr", + 1488: "docstor", + 1489: "dmdocbroker", + 1490: "insitu-conf", + 1492: "stone-design-1", + 1493: "netmap-lm", + 1494: "ica", + 1495: "cvc", + 1496: "liberty-lm", + 1497: "rfx-lm", + 1498: "sybase-sqlany", + 1499: "fhc", + 1500: "vlsi-lm", + 1501: "saiscm", + 1502: "shivadiscovery", + 1503: "imtc-mcs", + 1504: "evb-elm", + 1505: "funkproxy", + 1506: "utcd", + 1507: "symplex", + 1508: "diagmond", + 1509: "robcad-lm", + 1510: "mvx-lm", + 1511: "3l-l1", + 1512: "wins", + 1513: "fujitsu-dtc", + 1514: "fujitsu-dtcns", + 1515: "ifor-protocol", + 1516: "vpad", + 1517: "vpac", + 1518: "vpvd", + 1519: "vpvc", + 1520: "atm-zip-office", + 1521: "ncube-lm", + 1522: "ricardo-lm", + 1523: "cichild-lm", + 1524: "ingreslock", + 1525: "orasrv", + 1526: "pdap-np", + 1527: "tlisrv", + 1529: "coauthor", + 1530: "rap-service", + 1531: "rap-listen", + 1532: "miroconnect", + 1533: "virtual-places", + 1534: "micromuse-lm", + 1535: "ampr-info", + 1536: "ampr-inter", + 1537: "sdsc-lm", + 1538: "3ds-lm", + 1539: "intellistor-lm", + 1540: "rds", + 1541: "rds2", + 1542: "gridgen-elmd", + 1543: "simba-cs", + 1544: "aspeclmd", + 1545: "vistium-share", + 1546: "abbaccuray", + 1547: "laplink", + 1548: "axon-lm", + 1549: "shivahose", + 1550: "3m-image-lm", + 1551: "hecmtl-db", + 1552: "pciarray", + 1553: "sna-cs", + 1554: "caci-lm", + 1555: "livelan", + 1556: "veritas-pbx", + 1557: "arbortext-lm", + 1558: "xingmpeg", + 1559: "web2host", + 1560: "asci-val", + 1561: "facilityview", + 1562: "pconnectmgr", + 1563: "cadabra-lm", + 1564: "pay-per-view", + 1565: "winddlb", + 1566: "corelvideo", + 1567: "jlicelmd", + 1568: "tsspmap", + 1569: "ets", + 1570: "orbixd", + 1571: "rdb-dbs-disp", + 1572: "chip-lm", + 1573: "itscomm-ns", + 1574: "mvel-lm", + 1575: "oraclenames", + 1576: "moldflow-lm", + 1577: "hypercube-lm", + 1578: "jacobus-lm", + 1579: "ioc-sea-lm", + 1580: "tn-tl-r1", + 1581: "mil-2045-47001", + 1582: "msims", + 1583: "simbaexpress", + 1584: "tn-tl-fd2", + 1585: "intv", + 1586: "ibm-abtact", + 1587: "pra-elmd", + 1588: "triquest-lm", + 1589: "vqp", + 1590: "gemini-lm", + 1591: "ncpm-pm", + 1592: "commonspace", + 1593: "mainsoft-lm", + 1594: "sixtrak", + 1595: "radio", + 1596: "radio-sm", + 1597: "orbplus-iiop", + 1598: "picknfs", + 1599: "simbaservices", + 1600: "issd", + 1601: "aas", + 1602: "inspect", + 1603: "picodbc", + 1604: "icabrowser", + 1605: "slp", + 1606: "slm-api", + 1607: "stt", + 1608: "smart-lm", + 1609: "isysg-lm", + 1610: "taurus-wh", + 1611: "ill", + 1612: "netbill-trans", + 1613: "netbill-keyrep", + 1614: "netbill-cred", + 1615: "netbill-auth", + 1616: "netbill-prod", + 1617: "nimrod-agent", + 1618: "skytelnet", + 1619: "xs-openstorage", + 1620: "faxportwinport", + 1621: "softdataphone", + 1622: "ontime", + 1623: "jaleosnd", + 1624: "udp-sr-port", + 1625: "svs-omagent", + 1626: "shockwave", + 1627: "t128-gateway", + 1628: "lontalk-norm", + 1629: "lontalk-urgnt", + 1630: "oraclenet8cman", + 1631: "visitview", + 1632: "pammratc", + 1633: "pammrpc", + 1634: "loaprobe", + 1635: "edb-server1", + 1636: "isdc", + 1637: "islc", + 1638: "ismc", + 1639: "cert-initiator", + 1640: "cert-responder", + 1641: "invision", + 1642: "isis-am", + 1643: "isis-ambc", + 1644: "saiseh", + 1645: "sightline", + 1646: "sa-msg-port", + 1647: "rsap", + 1648: "concurrent-lm", + 1649: "kermit", + 1650: "nkd", + 1651: "shiva-confsrvr", + 1652: "xnmp", + 1653: "alphatech-lm", + 1654: "stargatealerts", + 1655: "dec-mbadmin", + 1656: "dec-mbadmin-h", + 1657: "fujitsu-mmpdc", + 1658: "sixnetudr", + 1659: "sg-lm", + 1660: "skip-mc-gikreq", + 1661: "netview-aix-1", + 1662: "netview-aix-2", + 1663: "netview-aix-3", + 1664: "netview-aix-4", + 1665: "netview-aix-5", + 1666: "netview-aix-6", + 1667: "netview-aix-7", + 1668: "netview-aix-8", + 1669: "netview-aix-9", + 1670: "netview-aix-10", + 1671: "netview-aix-11", + 1672: "netview-aix-12", + 1673: "proshare-mc-1", + 1674: "proshare-mc-2", + 1675: "pdp", + 1676: "netcomm1", + 1677: "groupwise", + 1678: "prolink", + 1679: "darcorp-lm", + 1680: "microcom-sbp", + 1681: "sd-elmd", + 1682: "lanyon-lantern", + 1683: "ncpm-hip", + 1684: "snaresecure", + 1685: "n2nremote", + 1686: "cvmon", + 1687: "nsjtp-ctrl", + 1688: "nsjtp-data", + 1689: "firefox", + 1690: "ng-umds", + 1691: "empire-empuma", + 1692: "sstsys-lm", + 1693: "rrirtr", + 1694: "rrimwm", + 1695: "rrilwm", + 1696: "rrifmm", + 1697: "rrisat", + 1698: "rsvp-encap-1", + 1699: "rsvp-encap-2", + 1700: "mps-raft", + 1701: "l2f", + 1702: "deskshare", + 1703: "hb-engine", + 1704: "bcs-broker", + 1705: "slingshot", + 1706: "jetform", + 1707: "vdmplay", + 1708: "gat-lmd", + 1709: "centra", + 1710: "impera", + 1711: "pptconference", + 1712: "registrar", + 1713: "conferencetalk", + 1714: "sesi-lm", + 1715: "houdini-lm", + 1716: "xmsg", + 1717: "fj-hdnet", + 1718: "h323gatedisc", + 1719: "h323gatestat", + 1720: "h323hostcall", + 1721: "caicci", + 1722: "hks-lm", + 1723: "pptp", + 1724: "csbphonemaster", + 1725: "iden-ralp", + 1726: "iberiagames", + 1727: "winddx", + 1728: "telindus", + 1729: "citynl", + 1730: "roketz", + 1731: "msiccp", + 1732: "proxim", + 1733: "siipat", + 1734: "cambertx-lm", + 1735: "privatechat", + 1736: "street-stream", + 1737: "ultimad", + 1738: "gamegen1", + 1739: "webaccess", + 1740: "encore", + 1741: "cisco-net-mgmt", + 1742: "3Com-nsd", + 1743: "cinegrfx-lm", + 1744: "ncpm-ft", + 1745: "remote-winsock", + 1746: "ftrapid-1", + 1747: "ftrapid-2", + 1748: "oracle-em1", + 1749: "aspen-services", + 1750: "sslp", + 1751: "swiftnet", + 1752: "lofr-lm", + 1753: "predatar-comms", + 1754: "oracle-em2", + 1755: "ms-streaming", + 1756: "capfast-lmd", + 1757: "cnhrp", + 1758: "tftp-mcast", + 1759: "spss-lm", + 1760: "www-ldap-gw", + 1761: "cft-0", + 1762: "cft-1", + 1763: "cft-2", + 1764: "cft-3", + 1765: "cft-4", + 1766: "cft-5", + 1767: "cft-6", + 1768: "cft-7", + 1769: "bmc-net-adm", + 1770: "bmc-net-svc", + 1771: "vaultbase", + 1772: "essweb-gw", + 1773: "kmscontrol", + 1774: "global-dtserv", + 1775: "vdab", + 1776: "femis", + 1777: "powerguardian", + 1778: "prodigy-intrnet", + 1779: "pharmasoft", + 1780: "dpkeyserv", + 1781: "answersoft-lm", + 1782: "hp-hcip", + 1784: "finle-lm", + 1785: "windlm", + 1786: "funk-logger", + 1787: "funk-license", + 1788: "psmond", + 1789: "hello", + 1790: "nmsp", + 1791: "ea1", + 1792: "ibm-dt-2", + 1793: "rsc-robot", + 1794: "cera-bcm", + 1795: "dpi-proxy", + 1796: "vocaltec-admin", + 1797: "uma", + 1798: "etp", + 1799: "netrisk", + 1800: "ansys-lm", + 1801: "msmq", + 1802: "concomp1", + 1803: "hp-hcip-gwy", + 1804: "enl", + 1805: "enl-name", + 1806: "musiconline", + 1807: "fhsp", + 1808: "oracle-vp2", + 1809: "oracle-vp1", + 1810: "jerand-lm", + 1811: "scientia-sdb", + 1812: "radius", + 1813: "radius-acct", + 1814: "tdp-suite", + 1815: "mmpft", + 1816: "harp", + 1817: "rkb-oscs", + 1818: "etftp", + 1819: "plato-lm", + 1820: "mcagent", + 1821: "donnyworld", + 1822: "es-elmd", + 1823: "unisys-lm", + 1824: "metrics-pas", + 1825: "direcpc-video", + 1826: "ardt", + 1827: "asi", + 1828: "itm-mcell-u", + 1829: "optika-emedia", + 1830: "net8-cman", + 1831: "myrtle", + 1832: "tht-treasure", + 1833: "udpradio", + 1834: "ardusuni", + 1835: "ardusmul", + 1836: "ste-smsc", + 1837: "csoft1", + 1838: "talnet", + 1839: "netopia-vo1", + 1840: "netopia-vo2", + 1841: "netopia-vo3", + 1842: "netopia-vo4", + 1843: "netopia-vo5", + 1844: "direcpc-dll", + 1845: "altalink", + 1846: "tunstall-pnc", + 1847: "slp-notify", + 1848: "fjdocdist", + 1849: "alpha-sms", + 1850: "gsi", + 1851: "ctcd", + 1852: "virtual-time", + 1853: "vids-avtp", + 1854: "buddy-draw", + 1855: "fiorano-rtrsvc", + 1856: "fiorano-msgsvc", + 1857: "datacaptor", + 1858: "privateark", + 1859: "gammafetchsvr", + 1860: "sunscalar-svc", + 1861: "lecroy-vicp", + 1862: "mysql-cm-agent", + 1863: "msnp", + 1864: "paradym-31port", + 1865: "entp", + 1866: "swrmi", + 1867: "udrive", + 1868: "viziblebrowser", + 1869: "transact", + 1870: "sunscalar-dns", + 1871: "canocentral0", + 1872: "canocentral1", + 1873: "fjmpjps", + 1874: "fjswapsnp", + 1875: "westell-stats", + 1876: "ewcappsrv", + 1877: "hp-webqosdb", + 1878: "drmsmc", + 1879: "nettgain-nms", + 1880: "vsat-control", + 1881: "ibm-mqseries2", + 1882: "ecsqdmn", + 1883: "mqtt", + 1884: "idmaps", + 1885: "vrtstrapserver", + 1886: "leoip", + 1887: "filex-lport", + 1888: "ncconfig", + 1889: "unify-adapter", + 1890: "wilkenlistener", + 1891: "childkey-notif", + 1892: "childkey-ctrl", + 1893: "elad", + 1894: "o2server-port", + 1896: "b-novative-ls", + 1897: "metaagent", + 1898: "cymtec-port", + 1899: "mc2studios", + 1900: "ssdp", + 1901: "fjicl-tep-a", + 1902: "fjicl-tep-b", + 1903: "linkname", + 1904: "fjicl-tep-c", + 1905: "sugp", + 1906: "tpmd", + 1907: "intrastar", + 1908: "dawn", + 1909: "global-wlink", + 1910: "ultrabac", + 1911: "mtp", + 1912: "rhp-iibp", + 1913: "armadp", + 1914: "elm-momentum", + 1915: "facelink", + 1916: "persona", + 1917: "noagent", + 1918: "can-nds", + 1919: "can-dch", + 1920: "can-ferret", + 1921: "noadmin", + 1922: "tapestry", + 1923: "spice", + 1924: "xiip", + 1925: "discovery-port", + 1926: "egs", + 1927: "videte-cipc", + 1928: "emsd-port", + 1929: "bandwiz-system", + 1930: "driveappserver", + 1931: "amdsched", + 1932: "ctt-broker", + 1933: "xmapi", + 1934: "xaapi", + 1935: "macromedia-fcs", + 1936: "jetcmeserver", + 1937: "jwserver", + 1938: "jwclient", + 1939: "jvserver", + 1940: "jvclient", + 1941: "dic-aida", + 1942: "res", + 1943: "beeyond-media", + 1944: "close-combat", + 1945: "dialogic-elmd", + 1946: "tekpls", + 1947: "sentinelsrm", + 1948: "eye2eye", + 1949: "ismaeasdaqlive", + 1950: "ismaeasdaqtest", + 1951: "bcs-lmserver", + 1952: "mpnjsc", + 1953: "rapidbase", + 1954: "abr-api", + 1955: "abr-secure", + 1956: "vrtl-vmf-ds", + 1957: "unix-status", + 1958: "dxadmind", + 1959: "simp-all", + 1960: "nasmanager", + 1961: "bts-appserver", + 1962: "biap-mp", + 1963: "webmachine", + 1964: "solid-e-engine", + 1965: "tivoli-npm", + 1966: "slush", + 1967: "sns-quote", + 1968: "lipsinc", + 1969: "lipsinc1", + 1970: "netop-rc", + 1971: "netop-school", + 1972: "intersys-cache", + 1973: "dlsrap", + 1974: "drp", + 1975: "tcoflashagent", + 1976: "tcoregagent", + 1977: "tcoaddressbook", + 1978: "unisql", + 1979: "unisql-java", + 1980: "pearldoc-xact", + 1981: "p2pq", + 1982: "estamp", + 1983: "lhtp", + 1984: "bb", + 1985: "hsrp", + 1986: "licensedaemon", + 1987: "tr-rsrb-p1", + 1988: "tr-rsrb-p2", + 1989: "tr-rsrb-p3", + 1990: "stun-p1", + 1991: "stun-p2", + 1992: "stun-p3", + 1993: "snmp-tcp-port", + 1994: "stun-port", + 1995: "perf-port", + 1996: "tr-rsrb-port", + 1997: "gdp-port", + 1998: "x25-svc-port", + 1999: "tcp-id-port", + 2000: "cisco-sccp", + 2001: "dc", + 2002: "globe", + 2003: "brutus", + 2004: "mailbox", + 2005: "berknet", + 2006: "invokator", + 2007: "dectalk", + 2008: "conf", + 2009: "news", + 2010: "search", + 2011: "raid-cc", + 2012: "ttyinfo", + 2013: "raid-am", + 2014: "troff", + 2015: "cypress", + 2016: "bootserver", + 2017: "cypress-stat", + 2018: "terminaldb", + 2019: "whosockami", + 2020: "xinupageserver", + 2021: "servexec", + 2022: "down", + 2023: "xinuexpansion3", + 2024: "xinuexpansion4", + 2025: "ellpack", + 2026: "scrabble", + 2027: "shadowserver", + 2028: "submitserver", + 2029: "hsrpv6", + 2030: "device2", + 2031: "mobrien-chat", + 2032: "blackboard", + 2033: "glogger", + 2034: "scoremgr", + 2035: "imsldoc", + 2036: "e-dpnet", + 2037: "applus", + 2038: "objectmanager", + 2039: "prizma", + 2040: "lam", + 2041: "interbase", + 2042: "isis", + 2043: "isis-bcast", + 2044: "rimsl", + 2045: "cdfunc", + 2046: "sdfunc", + 2047: "dls", + 2048: "dls-monitor", + 2049: "shilp", + 2050: "av-emb-config", + 2051: "epnsdp", + 2052: "clearvisn", + 2053: "lot105-ds-upd", + 2054: "weblogin", + 2055: "iop", + 2056: "omnisky", + 2057: "rich-cp", + 2058: "newwavesearch", + 2059: "bmc-messaging", + 2060: "teleniumdaemon", + 2061: "netmount", + 2062: "icg-swp", + 2063: "icg-bridge", + 2064: "icg-iprelay", + 2065: "dlsrpn", + 2066: "aura", + 2067: "dlswpn", + 2068: "avauthsrvprtcl", + 2069: "event-port", + 2070: "ah-esp-encap", + 2071: "acp-port", + 2072: "msync", + 2073: "gxs-data-port", + 2074: "vrtl-vmf-sa", + 2075: "newlixengine", + 2076: "newlixconfig", + 2077: "tsrmagt", + 2078: "tpcsrvr", + 2079: "idware-router", + 2080: "autodesk-nlm", + 2081: "kme-trap-port", + 2082: "infowave", + 2083: "radsec", + 2084: "sunclustergeo", + 2085: "ada-cip", + 2086: "gnunet", + 2087: "eli", + 2088: "ip-blf", + 2089: "sep", + 2090: "lrp", + 2091: "prp", + 2092: "descent3", + 2093: "nbx-cc", + 2094: "nbx-au", + 2095: "nbx-ser", + 2096: "nbx-dir", + 2097: "jetformpreview", + 2098: "dialog-port", + 2099: "h2250-annex-g", + 2100: "amiganetfs", + 2101: "rtcm-sc104", + 2102: "zephyr-srv", + 2103: "zephyr-clt", + 2104: "zephyr-hm", + 2105: "minipay", + 2106: "mzap", + 2107: "bintec-admin", + 2108: "comcam", + 2109: "ergolight", + 2110: "umsp", + 2111: "dsatp", + 2112: "idonix-metanet", + 2113: "hsl-storm", + 2114: "newheights", + 2115: "kdm", + 2116: "ccowcmr", + 2117: "mentaclient", + 2118: "mentaserver", + 2119: "gsigatekeeper", + 2120: "qencp", + 2121: "scientia-ssdb", + 2122: "caupc-remote", + 2123: "gtp-control", + 2124: "elatelink", + 2125: "lockstep", + 2126: "pktcable-cops", + 2127: "index-pc-wb", + 2128: "net-steward", + 2129: "cs-live", + 2130: "xds", + 2131: "avantageb2b", + 2132: "solera-epmap", + 2133: "zymed-zpp", + 2134: "avenue", + 2135: "gris", + 2136: "appworxsrv", + 2137: "connect", + 2138: "unbind-cluster", + 2139: "ias-auth", + 2140: "ias-reg", + 2141: "ias-admind", + 2142: "tdmoip", + 2143: "lv-jc", + 2144: "lv-ffx", + 2145: "lv-pici", + 2146: "lv-not", + 2147: "lv-auth", + 2148: "veritas-ucl", + 2149: "acptsys", + 2150: "dynamic3d", + 2151: "docent", + 2152: "gtp-user", + 2153: "ctlptc", + 2154: "stdptc", + 2155: "brdptc", + 2156: "trp", + 2157: "xnds", + 2158: "touchnetplus", + 2159: "gdbremote", + 2160: "apc-2160", + 2161: "apc-2161", + 2162: "navisphere", + 2163: "navisphere-sec", + 2164: "ddns-v3", + 2165: "x-bone-api", + 2166: "iwserver", + 2167: "raw-serial", + 2168: "easy-soft-mux", + 2169: "brain", + 2170: "eyetv", + 2171: "msfw-storage", + 2172: "msfw-s-storage", + 2173: "msfw-replica", + 2174: "msfw-array", + 2175: "airsync", + 2176: "rapi", + 2177: "qwave", + 2178: "bitspeer", + 2179: "vmrdp", + 2180: "mc-gt-srv", + 2181: "eforward", + 2182: "cgn-stat", + 2183: "cgn-config", + 2184: "nvd", + 2185: "onbase-dds", + 2186: "gtaua", + 2187: "ssmc", + 2188: "radware-rpm", + 2189: "radware-rpm-s", + 2190: "tivoconnect", + 2191: "tvbus", + 2192: "asdis", + 2193: "drwcs", + 2197: "mnp-exchange", + 2198: "onehome-remote", + 2199: "onehome-help", + 2200: "ici", + 2201: "ats", + 2202: "imtc-map", + 2203: "b2-runtime", + 2204: "b2-license", + 2205: "jps", + 2206: "hpocbus", + 2207: "hpssd", + 2208: "hpiod", + 2209: "rimf-ps", + 2210: "noaaport", + 2211: "emwin", + 2212: "leecoposserver", + 2213: "kali", + 2214: "rpi", + 2215: "ipcore", + 2216: "vtu-comms", + 2217: "gotodevice", + 2218: "bounzza", + 2219: "netiq-ncap", + 2220: "netiq", + 2221: "ethernet-ip-s", + 2222: "EtherNet-IP-1", + 2223: "rockwell-csp2", + 2224: "efi-mg", + 2225: "rcip-itu", + 2226: "di-drm", + 2227: "di-msg", + 2228: "ehome-ms", + 2229: "datalens", + 2230: "queueadm", + 2231: "wimaxasncp", + 2232: "ivs-video", + 2233: "infocrypt", + 2234: "directplay", + 2235: "sercomm-wlink", + 2236: "nani", + 2237: "optech-port1-lm", + 2238: "aviva-sna", + 2239: "imagequery", + 2240: "recipe", + 2241: "ivsd", + 2242: "foliocorp", + 2243: "magicom", + 2244: "nmsserver", + 2245: "hao", + 2246: "pc-mta-addrmap", + 2247: "antidotemgrsvr", + 2248: "ums", + 2249: "rfmp", + 2250: "remote-collab", + 2251: "dif-port", + 2252: "njenet-ssl", + 2253: "dtv-chan-req", + 2254: "seispoc", + 2255: "vrtp", + 2256: "pcc-mfp", + 2257: "simple-tx-rx", + 2258: "rcts", + 2260: "apc-2260", + 2261: "comotionmaster", + 2262: "comotionback", + 2263: "ecwcfg", + 2264: "apx500api-1", + 2265: "apx500api-2", + 2266: "mfserver", + 2267: "ontobroker", + 2268: "amt", + 2269: "mikey", + 2270: "starschool", + 2271: "mmcals", + 2272: "mmcal", + 2273: "mysql-im", + 2274: "pcttunnell", + 2275: "ibridge-data", + 2276: "ibridge-mgmt", + 2277: "bluectrlproxy", + 2278: "s3db", + 2279: "xmquery", + 2280: "lnvpoller", + 2281: "lnvconsole", + 2282: "lnvalarm", + 2283: "lnvstatus", + 2284: "lnvmaps", + 2285: "lnvmailmon", + 2286: "nas-metering", + 2287: "dna", + 2288: "netml", + 2289: "dict-lookup", + 2290: "sonus-logging", + 2291: "eapsp", + 2292: "mib-streaming", + 2293: "npdbgmngr", + 2294: "konshus-lm", + 2295: "advant-lm", + 2296: "theta-lm", + 2297: "d2k-datamover1", + 2298: "d2k-datamover2", + 2299: "pc-telecommute", + 2300: "cvmmon", + 2301: "cpq-wbem", + 2302: "binderysupport", + 2303: "proxy-gateway", + 2304: "attachmate-uts", + 2305: "mt-scaleserver", + 2306: "tappi-boxnet", + 2307: "pehelp", + 2308: "sdhelp", + 2309: "sdserver", + 2310: "sdclient", + 2311: "messageservice", + 2312: "wanscaler", + 2313: "iapp", + 2314: "cr-websystems", + 2315: "precise-sft", + 2316: "sent-lm", + 2317: "attachmate-g32", + 2318: "cadencecontrol", + 2319: "infolibria", + 2320: "siebel-ns", + 2321: "rdlap", + 2322: "ofsd", + 2323: "3d-nfsd", + 2324: "cosmocall", + 2325: "ansysli", + 2326: "idcp", + 2327: "xingcsm", + 2328: "netrix-sftm", + 2329: "nvd", + 2330: "tscchat", + 2331: "agentview", + 2332: "rcc-host", + 2333: "snapp", + 2334: "ace-client", + 2335: "ace-proxy", + 2336: "appleugcontrol", + 2337: "ideesrv", + 2338: "norton-lambert", + 2339: "3com-webview", + 2340: "wrs-registry", + 2341: "xiostatus", + 2342: "manage-exec", + 2343: "nati-logos", + 2344: "fcmsys", + 2345: "dbm", + 2346: "redstorm-join", + 2347: "redstorm-find", + 2348: "redstorm-info", + 2349: "redstorm-diag", + 2350: "psbserver", + 2351: "psrserver", + 2352: "pslserver", + 2353: "pspserver", + 2354: "psprserver", + 2355: "psdbserver", + 2356: "gxtelmd", + 2357: "unihub-server", + 2358: "futrix", + 2359: "flukeserver", + 2360: "nexstorindltd", + 2361: "tl1", + 2362: "digiman", + 2363: "mediacntrlnfsd", + 2364: "oi-2000", + 2365: "dbref", + 2366: "qip-login", + 2367: "service-ctrl", + 2368: "opentable", + 2370: "l3-hbmon", + 2371: "hp-rda", + 2372: "lanmessenger", + 2373: "remographlm", + 2374: "hydra", + 2375: "docker", + 2376: "docker-s", + 2377: "swarm", + 2379: "etcd-client", + 2380: "etcd-server", + 2381: "compaq-https", + 2382: "ms-olap3", + 2383: "ms-olap4", + 2384: "sd-request", + 2385: "sd-data", + 2386: "virtualtape", + 2387: "vsamredirector", + 2388: "mynahautostart", + 2389: "ovsessionmgr", + 2390: "rsmtp", + 2391: "3com-net-mgmt", + 2392: "tacticalauth", + 2393: "ms-olap1", + 2394: "ms-olap2", + 2395: "lan900-remote", + 2396: "wusage", + 2397: "ncl", + 2398: "orbiter", + 2399: "fmpro-fdal", + 2400: "opequus-server", + 2401: "cvspserver", + 2402: "taskmaster2000", + 2403: "taskmaster2000", + 2404: "iec-104", + 2405: "trc-netpoll", + 2406: "jediserver", + 2407: "orion", + 2408: "railgun-webaccl", + 2409: "sns-protocol", + 2410: "vrts-registry", + 2411: "netwave-ap-mgmt", + 2412: "cdn", + 2413: "orion-rmi-reg", + 2414: "beeyond", + 2415: "codima-rtp", + 2416: "rmtserver", + 2417: "composit-server", + 2418: "cas", + 2419: "attachmate-s2s", + 2420: "dslremote-mgmt", + 2421: "g-talk", + 2422: "crmsbits", + 2423: "rnrp", + 2424: "kofax-svr", + 2425: "fjitsuappmgr", + 2426: "vcmp", + 2427: "mgcp-gateway", + 2428: "ott", + 2429: "ft-role", + 2430: "venus", + 2431: "venus-se", + 2432: "codasrv", + 2433: "codasrv-se", + 2434: "pxc-epmap", + 2435: "optilogic", + 2436: "topx", + 2437: "unicontrol", + 2438: "msp", + 2439: "sybasedbsynch", + 2440: "spearway", + 2441: "pvsw-inet", + 2442: "netangel", + 2443: "powerclientcsf", + 2444: "btpp2sectrans", + 2445: "dtn1", + 2446: "bues-service", + 2447: "ovwdb", + 2448: "hpppssvr", + 2449: "ratl", + 2450: "netadmin", + 2451: "netchat", + 2452: "snifferclient", + 2453: "madge-ltd", + 2454: "indx-dds", + 2455: "wago-io-system", + 2456: "altav-remmgt", + 2457: "rapido-ip", + 2458: "griffin", + 2459: "community", + 2460: "ms-theater", + 2461: "qadmifoper", + 2462: "qadmifevent", + 2463: "lsi-raid-mgmt", + 2464: "direcpc-si", + 2465: "lbm", + 2466: "lbf", + 2467: "high-criteria", + 2468: "qip-msgd", + 2469: "mti-tcs-comm", + 2470: "taskman-port", + 2471: "seaodbc", + 2472: "c3", + 2473: "aker-cdp", + 2474: "vitalanalysis", + 2475: "ace-server", + 2476: "ace-svr-prop", + 2477: "ssm-cvs", + 2478: "ssm-cssps", + 2479: "ssm-els", + 2480: "powerexchange", + 2481: "giop", + 2482: "giop-ssl", + 2483: "ttc", + 2484: "ttc-ssl", + 2485: "netobjects1", + 2486: "netobjects2", + 2487: "pns", + 2488: "moy-corp", + 2489: "tsilb", + 2490: "qip-qdhcp", + 2491: "conclave-cpp", + 2492: "groove", + 2493: "talarian-mqs", + 2494: "bmc-ar", + 2495: "fast-rem-serv", + 2496: "dirgis", + 2497: "quaddb", + 2498: "odn-castraq", + 2499: "unicontrol", + 2500: "rtsserv", + 2501: "rtsclient", + 2502: "kentrox-prot", + 2503: "nms-dpnss", + 2504: "wlbs", + 2505: "ppcontrol", + 2506: "jbroker", + 2507: "spock", + 2508: "jdatastore", + 2509: "fjmpss", + 2510: "fjappmgrbulk", + 2511: "metastorm", + 2512: "citrixima", + 2513: "citrixadmin", + 2514: "facsys-ntp", + 2515: "facsys-router", + 2516: "maincontrol", + 2517: "call-sig-trans", + 2518: "willy", + 2519: "globmsgsvc", + 2520: "pvsw", + 2521: "adaptecmgr", + 2522: "windb", + 2523: "qke-llc-v3", + 2524: "optiwave-lm", + 2525: "ms-v-worlds", + 2526: "ema-sent-lm", + 2527: "iqserver", + 2528: "ncr-ccl", + 2529: "utsftp", + 2530: "vrcommerce", + 2531: "ito-e-gui", + 2532: "ovtopmd", + 2533: "snifferserver", + 2534: "combox-web-acc", + 2535: "madcap", + 2536: "btpp2audctr1", + 2537: "upgrade", + 2538: "vnwk-prapi", + 2539: "vsiadmin", + 2540: "lonworks", + 2541: "lonworks2", + 2542: "udrawgraph", + 2543: "reftek", + 2544: "novell-zen", + 2545: "sis-emt", + 2546: "vytalvaultbrtp", + 2547: "vytalvaultvsmp", + 2548: "vytalvaultpipe", + 2549: "ipass", + 2550: "ads", + 2551: "isg-uda-server", + 2552: "call-logging", + 2553: "efidiningport", + 2554: "vcnet-link-v10", + 2555: "compaq-wcp", + 2556: "nicetec-nmsvc", + 2557: "nicetec-mgmt", + 2558: "pclemultimedia", + 2559: "lstp", + 2560: "labrat", + 2561: "mosaixcc", + 2562: "delibo", + 2563: "cti-redwood", + 2564: "hp-3000-telnet", + 2565: "coord-svr", + 2566: "pcs-pcw", + 2567: "clp", + 2568: "spamtrap", + 2569: "sonuscallsig", + 2570: "hs-port", + 2571: "cecsvc", + 2572: "ibp", + 2573: "trustestablish", + 2574: "blockade-bpsp", + 2575: "hl7", + 2576: "tclprodebugger", + 2577: "scipticslsrvr", + 2578: "rvs-isdn-dcp", + 2579: "mpfoncl", + 2580: "tributary", + 2581: "argis-te", + 2582: "argis-ds", + 2583: "mon", + 2584: "cyaserv", + 2585: "netx-server", + 2586: "netx-agent", + 2587: "masc", + 2588: "privilege", + 2589: "quartus-tcl", + 2590: "idotdist", + 2591: "maytagshuffle", + 2592: "netrek", + 2593: "mns-mail", + 2594: "dts", + 2595: "worldfusion1", + 2596: "worldfusion2", + 2597: "homesteadglory", + 2598: "citriximaclient", + 2599: "snapd", + 2600: "hpstgmgr", + 2601: "discp-client", + 2602: "discp-server", + 2603: "servicemeter", + 2604: "nsc-ccs", + 2605: "nsc-posa", + 2606: "netmon", + 2607: "connection", + 2608: "wag-service", + 2609: "system-monitor", + 2610: "versa-tek", + 2611: "lionhead", + 2612: "qpasa-agent", + 2613: "smntubootstrap", + 2614: "neveroffline", + 2615: "firepower", + 2616: "appswitch-emp", + 2617: "cmadmin", + 2618: "priority-e-com", + 2619: "bruce", + 2620: "lpsrecommender", + 2621: "miles-apart", + 2622: "metricadbc", + 2623: "lmdp", + 2624: "aria", + 2625: "blwnkl-port", + 2626: "gbjd816", + 2627: "moshebeeri", + 2628: "dict", + 2629: "sitaraserver", + 2630: "sitaramgmt", + 2631: "sitaradir", + 2632: "irdg-post", + 2633: "interintelli", + 2634: "pk-electronics", + 2635: "backburner", + 2636: "solve", + 2637: "imdocsvc", + 2638: "sybaseanywhere", + 2639: "aminet", + 2640: "ami-control", + 2641: "hdl-srv", + 2642: "tragic", + 2643: "gte-samp", + 2644: "travsoft-ipx-t", + 2645: "novell-ipx-cmd", + 2646: "and-lm", + 2647: "syncserver", + 2648: "upsnotifyprot", + 2649: "vpsipport", + 2650: "eristwoguns", + 2651: "ebinsite", + 2652: "interpathpanel", + 2653: "sonus", + 2654: "corel-vncadmin", + 2655: "unglue", + 2656: "kana", + 2657: "sns-dispatcher", + 2658: "sns-admin", + 2659: "sns-query", + 2660: "gcmonitor", + 2661: "olhost", + 2662: "bintec-capi", + 2663: "bintec-tapi", + 2664: "patrol-mq-gm", + 2665: "patrol-mq-nm", + 2666: "extensis", + 2667: "alarm-clock-s", + 2668: "alarm-clock-c", + 2669: "toad", + 2670: "tve-announce", + 2671: "newlixreg", + 2672: "nhserver", + 2673: "firstcall42", + 2674: "ewnn", + 2675: "ttc-etap", + 2676: "simslink", + 2677: "gadgetgate1way", + 2678: "gadgetgate2way", + 2679: "syncserverssl", + 2680: "pxc-sapxom", + 2681: "mpnjsomb", + 2683: "ncdloadbalance", + 2684: "mpnjsosv", + 2685: "mpnjsocl", + 2686: "mpnjsomg", + 2687: "pq-lic-mgmt", + 2688: "md-cg-http", + 2689: "fastlynx", + 2690: "hp-nnm-data", + 2691: "itinternet", + 2692: "admins-lms", + 2694: "pwrsevent", + 2695: "vspread", + 2696: "unifyadmin", + 2697: "oce-snmp-trap", + 2698: "mck-ivpip", + 2699: "csoft-plusclnt", + 2700: "tqdata", + 2701: "sms-rcinfo", + 2702: "sms-xfer", + 2703: "sms-chat", + 2704: "sms-remctrl", + 2705: "sds-admin", + 2706: "ncdmirroring", + 2707: "emcsymapiport", + 2708: "banyan-net", + 2709: "supermon", + 2710: "sso-service", + 2711: "sso-control", + 2712: "aocp", + 2713: "raventbs", + 2714: "raventdm", + 2715: "hpstgmgr2", + 2716: "inova-ip-disco", + 2717: "pn-requester", + 2718: "pn-requester2", + 2719: "scan-change", + 2720: "wkars", + 2721: "smart-diagnose", + 2722: "proactivesrvr", + 2723: "watchdog-nt", + 2724: "qotps", + 2725: "msolap-ptp2", + 2726: "tams", + 2727: "mgcp-callagent", + 2728: "sqdr", + 2729: "tcim-control", + 2730: "nec-raidplus", + 2731: "fyre-messanger", + 2732: "g5m", + 2733: "signet-ctf", + 2734: "ccs-software", + 2735: "netiq-mc", + 2736: "radwiz-nms-srv", + 2737: "srp-feedback", + 2738: "ndl-tcp-ois-gw", + 2739: "tn-timing", + 2740: "alarm", + 2741: "tsb", + 2742: "tsb2", + 2743: "murx", + 2744: "honyaku", + 2745: "urbisnet", + 2746: "cpudpencap", + 2747: "fjippol-swrly", + 2748: "fjippol-polsvr", + 2749: "fjippol-cnsl", + 2750: "fjippol-port1", + 2751: "fjippol-port2", + 2752: "rsisysaccess", + 2753: "de-spot", + 2754: "apollo-cc", + 2755: "expresspay", + 2756: "simplement-tie", + 2757: "cnrp", + 2758: "apollo-status", + 2759: "apollo-gms", + 2760: "sabams", + 2761: "dicom-iscl", + 2762: "dicom-tls", + 2763: "desktop-dna", + 2764: "data-insurance", + 2765: "qip-audup", + 2766: "compaq-scp", + 2767: "uadtc", + 2768: "uacs", + 2769: "exce", + 2770: "veronica", + 2771: "vergencecm", + 2772: "auris", + 2773: "rbakcup1", + 2774: "rbakcup2", + 2775: "smpp", + 2776: "ridgeway1", + 2777: "ridgeway2", + 2778: "gwen-sonya", + 2779: "lbc-sync", + 2780: "lbc-control", + 2781: "whosells", + 2782: "everydayrc", + 2783: "aises", + 2784: "www-dev", + 2785: "aic-np", + 2786: "aic-oncrpc", + 2787: "piccolo", + 2788: "fryeserv", + 2789: "media-agent", + 2790: "plgproxy", + 2791: "mtport-regist", + 2792: "f5-globalsite", + 2793: "initlsmsad", + 2795: "livestats", + 2796: "ac-tech", + 2797: "esp-encap", + 2798: "tmesis-upshot", + 2799: "icon-discover", + 2800: "acc-raid", + 2801: "igcp", + 2802: "veritas-tcp1", + 2803: "btprjctrl", + 2804: "dvr-esm", + 2805: "wta-wsp-s", + 2806: "cspuni", + 2807: "cspmulti", + 2808: "j-lan-p", + 2809: "corbaloc", + 2810: "netsteward", + 2811: "gsiftp", + 2812: "atmtcp", + 2813: "llm-pass", + 2814: "llm-csv", + 2815: "lbc-measure", + 2816: "lbc-watchdog", + 2817: "nmsigport", + 2818: "rmlnk", + 2819: "fc-faultnotify", + 2820: "univision", + 2821: "vrts-at-port", + 2822: "ka0wuc", + 2823: "cqg-netlan", + 2824: "cqg-netlan-1", + 2826: "slc-systemlog", + 2827: "slc-ctrlrloops", + 2828: "itm-lm", + 2829: "silkp1", + 2830: "silkp2", + 2831: "silkp3", + 2832: "silkp4", + 2833: "glishd", + 2834: "evtp", + 2835: "evtp-data", + 2836: "catalyst", + 2837: "repliweb", + 2838: "starbot", + 2839: "nmsigport", + 2840: "l3-exprt", + 2841: "l3-ranger", + 2842: "l3-hawk", + 2843: "pdnet", + 2844: "bpcp-poll", + 2845: "bpcp-trap", + 2846: "aimpp-hello", + 2847: "aimpp-port-req", + 2848: "amt-blc-port", + 2849: "fxp", + 2850: "metaconsole", + 2851: "webemshttp", + 2852: "bears-01", + 2853: "ispipes", + 2854: "infomover", + 2855: "msrp", + 2856: "cesdinv", + 2857: "simctlp", + 2858: "ecnp", + 2859: "activememory", + 2860: "dialpad-voice1", + 2861: "dialpad-voice2", + 2862: "ttg-protocol", + 2863: "sonardata", + 2864: "astromed-main", + 2865: "pit-vpn", + 2866: "iwlistener", + 2867: "esps-portal", + 2868: "npep-messaging", + 2869: "icslap", + 2870: "daishi", + 2871: "msi-selectplay", + 2872: "radix", + 2874: "dxmessagebase1", + 2875: "dxmessagebase2", + 2876: "sps-tunnel", + 2877: "bluelance", + 2878: "aap", + 2879: "ucentric-ds", + 2880: "synapse", + 2881: "ndsp", + 2882: "ndtp", + 2883: "ndnp", + 2884: "flashmsg", + 2885: "topflow", + 2886: "responselogic", + 2887: "aironetddp", + 2888: "spcsdlobby", + 2889: "rsom", + 2890: "cspclmulti", + 2891: "cinegrfx-elmd", + 2892: "snifferdata", + 2893: "vseconnector", + 2894: "abacus-remote", + 2895: "natuslink", + 2896: "ecovisiong6-1", + 2897: "citrix-rtmp", + 2898: "appliance-cfg", + 2899: "powergemplus", + 2900: "quicksuite", + 2901: "allstorcns", + 2902: "netaspi", + 2903: "suitcase", + 2904: "m2ua", + 2905: "m3ua", + 2906: "caller9", + 2907: "webmethods-b2b", + 2908: "mao", + 2909: "funk-dialout", + 2910: "tdaccess", + 2911: "blockade", + 2912: "epicon", + 2913: "boosterware", + 2914: "gamelobby", + 2915: "tksocket", + 2916: "elvin-server", + 2917: "elvin-client", + 2918: "kastenchasepad", + 2919: "roboer", + 2920: "roboeda", + 2921: "cesdcdman", + 2922: "cesdcdtrn", + 2923: "wta-wsp-wtp-s", + 2924: "precise-vip", + 2926: "mobile-file-dl", + 2927: "unimobilectrl", + 2928: "redstone-cpss", + 2929: "amx-webadmin", + 2930: "amx-weblinx", + 2931: "circle-x", + 2932: "incp", + 2933: "4-tieropmgw", + 2934: "4-tieropmcli", + 2935: "qtp", + 2936: "otpatch", + 2937: "pnaconsult-lm", + 2938: "sm-pas-1", + 2939: "sm-pas-2", + 2940: "sm-pas-3", + 2941: "sm-pas-4", + 2942: "sm-pas-5", + 2943: "ttnrepository", + 2944: "megaco-h248", + 2945: "h248-binary", + 2946: "fjsvmpor", + 2947: "gpsd", + 2948: "wap-push", + 2949: "wap-pushsecure", + 2950: "esip", + 2951: "ottp", + 2952: "mpfwsas", + 2953: "ovalarmsrv", + 2954: "ovalarmsrv-cmd", + 2955: "csnotify", + 2956: "ovrimosdbman", + 2957: "jmact5", + 2958: "jmact6", + 2959: "rmopagt", + 2960: "dfoxserver", + 2961: "boldsoft-lm", + 2962: "iph-policy-cli", + 2963: "iph-policy-adm", + 2964: "bullant-srap", + 2965: "bullant-rap", + 2966: "idp-infotrieve", + 2967: "ssc-agent", + 2968: "enpp", + 2969: "essp", + 2970: "index-net", + 2971: "netclip", + 2972: "pmsm-webrctl", + 2973: "svnetworks", + 2974: "signal", + 2975: "fjmpcm", + 2976: "cns-srv-port", + 2977: "ttc-etap-ns", + 2978: "ttc-etap-ds", + 2979: "h263-video", + 2980: "wimd", + 2981: "mylxamport", + 2982: "iwb-whiteboard", + 2983: "netplan", + 2984: "hpidsadmin", + 2985: "hpidsagent", + 2986: "stonefalls", + 2987: "identify", + 2988: "hippad", + 2989: "zarkov", + 2990: "boscap", + 2991: "wkstn-mon", + 2992: "avenyo", + 2993: "veritas-vis1", + 2994: "veritas-vis2", + 2995: "idrs", + 2996: "vsixml", + 2997: "rebol", + 2998: "realsecure", + 2999: "remoteware-un", + 3000: "hbci", + 3001: "origo-native", + 3002: "exlm-agent", + 3003: "cgms", + 3004: "csoftragent", + 3005: "geniuslm", + 3006: "ii-admin", + 3007: "lotusmtap", + 3008: "midnight-tech", + 3009: "pxc-ntfy", + 3010: "gw", + 3011: "trusted-web", + 3012: "twsdss", + 3013: "gilatskysurfer", + 3014: "broker-service", + 3015: "nati-dstp", + 3016: "notify-srvr", + 3017: "event-listener", + 3018: "srvc-registry", + 3019: "resource-mgr", + 3020: "cifs", + 3021: "agriserver", + 3022: "csregagent", + 3023: "magicnotes", + 3024: "nds-sso", + 3025: "arepa-raft", + 3026: "agri-gateway", + 3027: "LiebDevMgmt-C", + 3028: "LiebDevMgmt-DM", + 3029: "LiebDevMgmt-A", + 3030: "arepa-cas", + 3031: "eppc", + 3032: "redwood-chat", + 3033: "pdb", + 3034: "osmosis-aeea", + 3035: "fjsv-gssagt", + 3036: "hagel-dump", + 3037: "hp-san-mgmt", + 3038: "santak-ups", + 3039: "cogitate", + 3040: "tomato-springs", + 3041: "di-traceware", + 3042: "journee", + 3043: "brp", + 3044: "epp", + 3045: "responsenet", + 3046: "di-ase", + 3047: "hlserver", + 3048: "pctrader", + 3049: "nsws", + 3050: "gds-db", + 3051: "galaxy-server", + 3052: "apc-3052", + 3053: "dsom-server", + 3054: "amt-cnf-prot", + 3055: "policyserver", + 3056: "cdl-server", + 3057: "goahead-fldup", + 3058: "videobeans", + 3059: "qsoft", + 3060: "interserver", + 3061: "cautcpd", + 3062: "ncacn-ip-tcp", + 3063: "ncadg-ip-udp", + 3064: "rprt", + 3065: "slinterbase", + 3066: "netattachsdmp", + 3067: "fjhpjp", + 3068: "ls3bcast", + 3069: "ls3", + 3070: "mgxswitch", + 3071: "xplat-replicate", + 3072: "csd-monitor", + 3073: "vcrp", + 3074: "xbox", + 3075: "orbix-locator", + 3076: "orbix-config", + 3077: "orbix-loc-ssl", + 3078: "orbix-cfg-ssl", + 3079: "lv-frontpanel", + 3080: "stm-pproc", + 3081: "tl1-lv", + 3082: "tl1-raw", + 3083: "tl1-telnet", + 3084: "itm-mccs", + 3085: "pcihreq", + 3086: "jdl-dbkitchen", + 3087: "asoki-sma", + 3088: "xdtp", + 3089: "ptk-alink", + 3090: "stss", + 3091: "1ci-smcs", + 3093: "rapidmq-center", + 3094: "rapidmq-reg", + 3095: "panasas", + 3096: "ndl-aps", + 3098: "umm-port", + 3099: "chmd", + 3100: "opcon-xps", + 3101: "hp-pxpib", + 3102: "slslavemon", + 3103: "autocuesmi", + 3104: "autocuelog", + 3105: "cardbox", + 3106: "cardbox-http", + 3107: "business", + 3108: "geolocate", + 3109: "personnel", + 3110: "sim-control", + 3111: "wsynch", + 3112: "ksysguard", + 3113: "cs-auth-svr", + 3114: "ccmad", + 3115: "mctet-master", + 3116: "mctet-gateway", + 3117: "mctet-jserv", + 3118: "pkagent", + 3119: "d2000kernel", + 3120: "d2000webserver", + 3121: "pcmk-remote", + 3122: "vtr-emulator", + 3123: "edix", + 3124: "beacon-port", + 3125: "a13-an", + 3127: "ctx-bridge", + 3128: "ndl-aas", + 3129: "netport-id", + 3130: "icpv2", + 3131: "netbookmark", + 3132: "ms-rule-engine", + 3133: "prism-deploy", + 3134: "ecp", + 3135: "peerbook-port", + 3136: "grubd", + 3137: "rtnt-1", + 3138: "rtnt-2", + 3139: "incognitorv", + 3140: "ariliamulti", + 3141: "vmodem", + 3142: "rdc-wh-eos", + 3143: "seaview", + 3144: "tarantella", + 3145: "csi-lfap", + 3146: "bears-02", + 3147: "rfio", + 3148: "nm-game-admin", + 3149: "nm-game-server", + 3150: "nm-asses-admin", + 3151: "nm-assessor", + 3152: "feitianrockey", + 3153: "s8-client-port", + 3154: "ccmrmi", + 3155: "jpegmpeg", + 3156: "indura", + 3157: "e3consultants", + 3158: "stvp", + 3159: "navegaweb-port", + 3160: "tip-app-server", + 3161: "doc1lm", + 3162: "sflm", + 3163: "res-sap", + 3164: "imprs", + 3165: "newgenpay", + 3166: "sossecollector", + 3167: "nowcontact", + 3168: "poweronnud", + 3169: "serverview-as", + 3170: "serverview-asn", + 3171: "serverview-gf", + 3172: "serverview-rm", + 3173: "serverview-icc", + 3174: "armi-server", + 3175: "t1-e1-over-ip", + 3176: "ars-master", + 3177: "phonex-port", + 3178: "radclientport", + 3179: "h2gf-w-2m", + 3180: "mc-brk-srv", + 3181: "bmcpatrolagent", + 3182: "bmcpatrolrnvu", + 3183: "cops-tls", + 3184: "apogeex-port", + 3185: "smpppd", + 3186: "iiw-port", + 3187: "odi-port", + 3188: "brcm-comm-port", + 3189: "pcle-infex", + 3190: "csvr-proxy", + 3191: "csvr-sslproxy", + 3192: "firemonrcc", + 3193: "spandataport", + 3194: "magbind", + 3195: "ncu-1", + 3196: "ncu-2", + 3197: "embrace-dp-s", + 3198: "embrace-dp-c", + 3199: "dmod-workspace", + 3200: "tick-port", + 3201: "cpq-tasksmart", + 3202: "intraintra", + 3203: "netwatcher-mon", + 3204: "netwatcher-db", + 3205: "isns", + 3206: "ironmail", + 3207: "vx-auth-port", + 3208: "pfu-prcallback", + 3209: "netwkpathengine", + 3210: "flamenco-proxy", + 3211: "avsecuremgmt", + 3212: "surveyinst", + 3213: "neon24x7", + 3214: "jmq-daemon-1", + 3215: "jmq-daemon-2", + 3216: "ferrari-foam", + 3217: "unite", + 3218: "smartpackets", + 3219: "wms-messenger", + 3220: "xnm-ssl", + 3221: "xnm-clear-text", + 3222: "glbp", + 3223: "digivote", + 3224: "aes-discovery", + 3225: "fcip-port", + 3226: "isi-irp", + 3227: "dwnmshttp", + 3228: "dwmsgserver", + 3229: "global-cd-port", + 3230: "sftdst-port", + 3231: "vidigo", + 3232: "mdtp", + 3233: "whisker", + 3234: "alchemy", + 3235: "mdap-port", + 3236: "apparenet-ts", + 3237: "apparenet-tps", + 3238: "apparenet-as", + 3239: "apparenet-ui", + 3240: "triomotion", + 3241: "sysorb", + 3242: "sdp-id-port", + 3243: "timelot", + 3244: "onesaf", + 3245: "vieo-fe", + 3246: "dvt-system", + 3247: "dvt-data", + 3248: "procos-lm", + 3249: "ssp", + 3250: "hicp", + 3251: "sysscanner", + 3252: "dhe", + 3253: "pda-data", + 3254: "pda-sys", + 3255: "semaphore", + 3256: "cpqrpm-agent", + 3257: "cpqrpm-server", + 3258: "ivecon-port", + 3259: "epncdp2", + 3260: "iscsi-target", + 3261: "winshadow", + 3262: "necp", + 3263: "ecolor-imager", + 3264: "ccmail", + 3265: "altav-tunnel", + 3266: "ns-cfg-server", + 3267: "ibm-dial-out", + 3268: "msft-gc", + 3269: "msft-gc-ssl", + 3270: "verismart", + 3271: "csoft-prev", + 3272: "user-manager", + 3273: "sxmp", + 3274: "ordinox-server", + 3275: "samd", + 3276: "maxim-asics", + 3277: "awg-proxy", + 3278: "lkcmserver", + 3279: "admind", + 3280: "vs-server", + 3281: "sysopt", + 3282: "datusorb", + 3283: "Apple Remote Desktop (Net Assistant)", + 3284: "4talk", + 3285: "plato", + 3286: "e-net", + 3287: "directvdata", + 3288: "cops", + 3289: "enpc", + 3290: "caps-lm", + 3291: "sah-lm", + 3292: "cart-o-rama", + 3293: "fg-fps", + 3294: "fg-gip", + 3295: "dyniplookup", + 3296: "rib-slm", + 3297: "cytel-lm", + 3298: "deskview", + 3299: "pdrncs", + 3300: "ceph", + 3302: "mcs-fastmail", + 3303: "opsession-clnt", + 3304: "opsession-srvr", + 3305: "odette-ftp", + 3306: "mysql", + 3307: "opsession-prxy", + 3308: "tns-server", + 3309: "tns-adv", + 3310: "dyna-access", + 3311: "mcns-tel-ret", + 3312: "appman-server", + 3313: "uorb", + 3314: "uohost", + 3315: "cdid", + 3316: "aicc-cmi", + 3317: "vsaiport", + 3318: "ssrip", + 3319: "sdt-lmd", + 3320: "officelink2000", + 3321: "vnsstr", + 3326: "sftu", + 3327: "bbars", + 3328: "egptlm", + 3329: "hp-device-disc", + 3330: "mcs-calypsoicf", + 3331: "mcs-messaging", + 3332: "mcs-mailsvr", + 3333: "dec-notes", + 3334: "directv-web", + 3335: "directv-soft", + 3336: "directv-tick", + 3337: "directv-catlg", + 3338: "anet-b", + 3339: "anet-l", + 3340: "anet-m", + 3341: "anet-h", + 3342: "webtie", + 3343: "ms-cluster-net", + 3344: "bnt-manager", + 3345: "influence", + 3346: "trnsprntproxy", + 3347: "phoenix-rpc", + 3348: "pangolin-laser", + 3349: "chevinservices", + 3350: "findviatv", + 3351: "btrieve", + 3352: "ssql", + 3353: "fatpipe", + 3354: "suitjd", + 3355: "ordinox-dbase", + 3356: "upnotifyps", + 3357: "adtech-test", + 3358: "mpsysrmsvr", + 3359: "wg-netforce", + 3360: "kv-server", + 3361: "kv-agent", + 3362: "dj-ilm", + 3363: "nati-vi-server", + 3364: "creativeserver", + 3365: "contentserver", + 3366: "creativepartnr", + 3372: "tip2", + 3373: "lavenir-lm", + 3374: "cluster-disc", + 3375: "vsnm-agent", + 3376: "cdbroker", + 3377: "cogsys-lm", + 3378: "wsicopy", + 3379: "socorfs", + 3380: "sns-channels", + 3381: "geneous", + 3382: "fujitsu-neat", + 3383: "esp-lm", + 3384: "hp-clic", + 3385: "qnxnetman", + 3386: "gprs-data", + 3387: "backroomnet", + 3388: "cbserver", + 3389: "ms-wbt-server", + 3390: "dsc", + 3391: "savant", + 3392: "efi-lm", + 3393: "d2k-tapestry1", + 3394: "d2k-tapestry2", + 3395: "dyna-lm", + 3396: "printer-agent", + 3397: "cloanto-lm", + 3398: "mercantile", + 3399: "csms", + 3400: "csms2", + 3401: "filecast", + 3402: "fxaengine-net", + 3405: "nokia-ann-ch1", + 3406: "nokia-ann-ch2", + 3407: "ldap-admin", + 3408: "BESApi", + 3409: "networklens", + 3410: "networklenss", + 3411: "biolink-auth", + 3412: "xmlblaster", + 3413: "svnet", + 3414: "wip-port", + 3415: "bcinameservice", + 3416: "commandport", + 3417: "csvr", + 3418: "rnmap", + 3419: "softaudit", + 3420: "ifcp-port", + 3421: "bmap", + 3422: "rusb-sys-port", + 3423: "xtrm", + 3424: "xtrms", + 3425: "agps-port", + 3426: "arkivio", + 3427: "websphere-snmp", + 3428: "twcss", + 3429: "gcsp", + 3430: "ssdispatch", + 3431: "ndl-als", + 3432: "osdcp", + 3433: "opnet-smp", + 3434: "opencm", + 3435: "pacom", + 3436: "gc-config", + 3437: "autocueds", + 3438: "spiral-admin", + 3439: "hri-port", + 3440: "ans-console", + 3441: "connect-client", + 3442: "connect-server", + 3443: "ov-nnm-websrv", + 3444: "denali-server", + 3445: "monp", + 3446: "3comfaxrpc", + 3447: "directnet", + 3448: "dnc-port", + 3449: "hotu-chat", + 3450: "castorproxy", + 3451: "asam", + 3452: "sabp-signal", + 3453: "pscupd", + 3454: "mira", + 3455: "prsvp", + 3456: "vat", + 3457: "vat-control", + 3458: "d3winosfi", + 3459: "integral", + 3460: "edm-manager", + 3461: "edm-stager", + 3462: "edm-std-notify", + 3463: "edm-adm-notify", + 3464: "edm-mgr-sync", + 3465: "edm-mgr-cntrl", + 3466: "workflow", + 3467: "rcst", + 3468: "ttcmremotectrl", + 3469: "pluribus", + 3470: "jt400", + 3471: "jt400-ssl", + 3472: "jaugsremotec-1", + 3473: "jaugsremotec-2", + 3474: "ttntspauto", + 3475: "genisar-port", + 3476: "nppmp", + 3477: "ecomm", + 3478: "stun", + 3479: "twrpc", + 3480: "plethora", + 3481: "cleanerliverc", + 3482: "vulture", + 3483: "slim-devices", + 3484: "gbs-stp", + 3485: "celatalk", + 3486: "ifsf-hb-port", + 3487: "ltctcp", + 3488: "fs-rh-srv", + 3489: "dtp-dia", + 3490: "colubris", + 3491: "swr-port", + 3492: "tvdumtray-port", + 3493: "nut", + 3494: "ibm3494", + 3495: "seclayer-tcp", + 3496: "seclayer-tls", + 3497: "ipether232port", + 3498: "dashpas-port", + 3499: "sccip-media", + 3500: "rtmp-port", + 3501: "isoft-p2p", + 3502: "avinstalldisc", + 3503: "lsp-ping", + 3504: "ironstorm", + 3505: "ccmcomm", + 3506: "apc-3506", + 3507: "nesh-broker", + 3508: "interactionweb", + 3509: "vt-ssl", + 3510: "xss-port", + 3511: "webmail-2", + 3512: "aztec", + 3513: "arcpd", + 3514: "must-p2p", + 3515: "must-backplane", + 3516: "smartcard-port", + 3517: "802-11-iapp", + 3518: "artifact-msg", + 3519: "nvmsgd", + 3520: "galileolog", + 3521: "mc3ss", + 3522: "nssocketport", + 3523: "odeumservlink", + 3524: "ecmport", + 3525: "eisport", + 3526: "starquiz-port", + 3527: "beserver-msg-q", + 3528: "jboss-iiop", + 3529: "jboss-iiop-ssl", + 3530: "gf", + 3531: "joltid", + 3532: "raven-rmp", + 3533: "raven-rdp", + 3534: "urld-port", + 3535: "ms-la", + 3536: "snac", + 3537: "ni-visa-remote", + 3538: "ibm-diradm", + 3539: "ibm-diradm-ssl", + 3540: "pnrp-port", + 3541: "voispeed-port", + 3542: "hacl-monitor", + 3543: "qftest-lookup", + 3544: "teredo", + 3545: "camac", + 3547: "symantec-sim", + 3548: "interworld", + 3549: "tellumat-nms", + 3550: "ssmpp", + 3551: "apcupsd", + 3552: "taserver", + 3553: "rbr-discovery", + 3554: "questnotify", + 3555: "razor", + 3556: "sky-transport", + 3557: "personalos-001", + 3558: "mcp-port", + 3559: "cctv-port", + 3560: "iniserve-port", + 3561: "bmc-onekey", + 3562: "sdbproxy", + 3563: "watcomdebug", + 3564: "esimport", + 3565: "m2pa", + 3566: "quest-data-hub", + 3567: "dof-eps", + 3568: "dof-tunnel-sec", + 3569: "mbg-ctrl", + 3570: "mccwebsvr-port", + 3571: "megardsvr-port", + 3572: "megaregsvrport", + 3573: "tag-ups-1", + 3574: "dmaf-server", + 3575: "ccm-port", + 3576: "cmc-port", + 3577: "config-port", + 3578: "data-port", + 3579: "ttat3lb", + 3580: "nati-svrloc", + 3581: "kfxaclicensing", + 3582: "press", + 3583: "canex-watch", + 3584: "u-dbap", + 3585: "emprise-lls", + 3586: "emprise-lsc", + 3587: "p2pgroup", + 3588: "sentinel", + 3589: "isomair", + 3590: "wv-csp-sms", + 3591: "gtrack-server", + 3592: "gtrack-ne", + 3593: "bpmd", + 3594: "mediaspace", + 3595: "shareapp", + 3596: "iw-mmogame", + 3597: "a14", + 3598: "a15", + 3599: "quasar-server", + 3600: "trap-daemon", + 3601: "visinet-gui", + 3602: "infiniswitchcl", + 3603: "int-rcv-cntrl", + 3604: "bmc-jmx-port", + 3605: "comcam-io", + 3606: "splitlock", + 3607: "precise-i3", + 3608: "trendchip-dcp", + 3609: "cpdi-pidas-cm", + 3610: "echonet", + 3611: "six-degrees", + 3612: "hp-dataprotect", + 3613: "alaris-disc", + 3614: "sigma-port", + 3615: "start-network", + 3616: "cd3o-protocol", + 3617: "sharp-server", + 3618: "aairnet-1", + 3619: "aairnet-2", + 3620: "ep-pcp", + 3621: "ep-nsp", + 3622: "ff-lr-port", + 3623: "haipe-discover", + 3624: "dist-upgrade", + 3625: "volley", + 3626: "bvcdaemon-port", + 3627: "jamserverport", + 3628: "ept-machine", + 3629: "escvpnet", + 3630: "cs-remote-db", + 3631: "cs-services", + 3632: "distcc", + 3633: "wacp", + 3634: "hlibmgr", + 3635: "sdo", + 3636: "servistaitsm", + 3637: "scservp", + 3638: "ehp-backup", + 3639: "xap-ha", + 3640: "netplay-port1", + 3641: "netplay-port2", + 3642: "juxml-port", + 3643: "audiojuggler", + 3644: "ssowatch", + 3645: "cyc", + 3646: "xss-srv-port", + 3647: "splitlock-gw", + 3648: "fjcp", + 3649: "nmmp", + 3650: "prismiq-plugin", + 3651: "xrpc-registry", + 3652: "vxcrnbuport", + 3653: "tsp", + 3654: "vaprtm", + 3655: "abatemgr", + 3656: "abatjss", + 3657: "immedianet-bcn", + 3658: "ps-ams", + 3659: "apple-sasl", + 3660: "can-nds-ssl", + 3661: "can-ferret-ssl", + 3662: "pserver", + 3663: "dtp", + 3664: "ups-engine", + 3665: "ent-engine", + 3666: "eserver-pap", + 3667: "infoexch", + 3668: "dell-rm-port", + 3669: "casanswmgmt", + 3670: "smile", + 3671: "efcp", + 3672: "lispworks-orb", + 3673: "mediavault-gui", + 3674: "wininstall-ipc", + 3675: "calltrax", + 3676: "va-pacbase", + 3677: "roverlog", + 3678: "ipr-dglt", + 3679: "Escale (Newton Dock)", + 3680: "npds-tracker", + 3681: "bts-x73", + 3682: "cas-mapi", + 3683: "bmc-ea", + 3684: "faxstfx-port", + 3685: "dsx-agent", + 3686: "tnmpv2", + 3687: "simple-push", + 3688: "simple-push-s", + 3689: "daap", + 3690: "svn", + 3691: "magaya-network", + 3692: "intelsync", + 3693: "easl", + 3695: "bmc-data-coll", + 3696: "telnetcpcd", + 3697: "nw-license", + 3698: "sagectlpanel", + 3699: "kpn-icw", + 3700: "lrs-paging", + 3701: "netcelera", + 3702: "ws-discovery", + 3703: "adobeserver-3", + 3704: "adobeserver-4", + 3705: "adobeserver-5", + 3706: "rt-event", + 3707: "rt-event-s", + 3708: "sun-as-iiops", + 3709: "ca-idms", + 3710: "portgate-auth", + 3711: "edb-server2", + 3712: "sentinel-ent", + 3713: "tftps", + 3714: "delos-dms", + 3715: "anoto-rendezv", + 3716: "wv-csp-sms-cir", + 3717: "wv-csp-udp-cir", + 3718: "opus-services", + 3719: "itelserverport", + 3720: "ufastro-instr", + 3721: "xsync", + 3722: "xserveraid", + 3723: "sychrond", + 3724: "blizwow", + 3725: "na-er-tip", + 3726: "array-manager", + 3727: "e-mdu", + 3728: "e-woa", + 3729: "fksp-audit", + 3730: "client-ctrl", + 3731: "smap", + 3732: "m-wnn", + 3733: "multip-msg", + 3734: "synel-data", + 3735: "pwdis", + 3736: "rs-rmi", + 3737: "xpanel", + 3738: "versatalk", + 3739: "launchbird-lm", + 3740: "heartbeat", + 3741: "wysdma", + 3742: "cst-port", + 3743: "ipcs-command", + 3744: "sasg", + 3745: "gw-call-port", + 3746: "linktest", + 3747: "linktest-s", + 3748: "webdata", + 3749: "cimtrak", + 3750: "cbos-ip-port", + 3751: "gprs-cube", + 3752: "vipremoteagent", + 3753: "nattyserver", + 3754: "timestenbroker", + 3755: "sas-remote-hlp", + 3756: "canon-capt", + 3757: "grf-port", + 3758: "apw-registry", + 3759: "exapt-lmgr", + 3760: "adtempusclient", + 3761: "gsakmp", + 3762: "gbs-smp", + 3763: "xo-wave", + 3764: "mni-prot-rout", + 3765: "rtraceroute", + 3766: "sitewatch-s", + 3767: "listmgr-port", + 3768: "rblcheckd", + 3769: "haipe-otnk", + 3770: "cindycollab", + 3771: "paging-port", + 3772: "ctp", + 3773: "ctdhercules", + 3774: "zicom", + 3775: "ispmmgr", + 3776: "dvcprov-port", + 3777: "jibe-eb", + 3778: "c-h-it-port", + 3779: "cognima", + 3780: "nnp", + 3781: "abcvoice-port", + 3782: "iso-tp0s", + 3783: "bim-pem", + 3784: "bfd-control", + 3785: "bfd-echo", + 3786: "upstriggervsw", + 3787: "fintrx", + 3788: "isrp-port", + 3789: "remotedeploy", + 3790: "quickbooksrds", + 3791: "tvnetworkvideo", + 3792: "sitewatch", + 3793: "dcsoftware", + 3794: "jaus", + 3795: "myblast", + 3796: "spw-dialer", + 3797: "idps", + 3798: "minilock", + 3799: "radius-dynauth", + 3800: "pwgpsi", + 3801: "ibm-mgr", + 3802: "vhd", + 3803: "soniqsync", + 3804: "iqnet-port", + 3805: "tcpdataserver", + 3806: "wsmlb", + 3807: "spugna", + 3808: "sun-as-iiops-ca", + 3809: "apocd", + 3810: "wlanauth", + 3811: "amp", + 3812: "neto-wol-server", + 3813: "rap-ip", + 3814: "neto-dcs", + 3815: "lansurveyorxml", + 3816: "sunlps-http", + 3817: "tapeware", + 3818: "crinis-hb", + 3819: "epl-slp", + 3820: "scp", + 3821: "pmcp", + 3822: "acp-discovery", + 3823: "acp-conduit", + 3824: "acp-policy", + 3825: "ffserver", + 3826: "warmux", + 3827: "netmpi", + 3828: "neteh", + 3829: "neteh-ext", + 3830: "cernsysmgmtagt", + 3831: "dvapps", + 3832: "xxnetserver", + 3833: "aipn-auth", + 3834: "spectardata", + 3835: "spectardb", + 3836: "markem-dcp", + 3837: "mkm-discovery", + 3838: "sos", + 3839: "amx-rms", + 3840: "flirtmitmir", + 3841: "shiprush-db-svr", + 3842: "nhci", + 3843: "quest-agent", + 3844: "rnm", + 3845: "v-one-spp", + 3846: "an-pcp", + 3847: "msfw-control", + 3848: "item", + 3849: "spw-dnspreload", + 3850: "qtms-bootstrap", + 3851: "spectraport", + 3852: "sse-app-config", + 3853: "sscan", + 3854: "stryker-com", + 3855: "opentrac", + 3856: "informer", + 3857: "trap-port", + 3858: "trap-port-mom", + 3859: "nav-port", + 3860: "sasp", + 3861: "winshadow-hd", + 3862: "giga-pocket", + 3863: "asap-tcp", + 3864: "asap-tcp-tls", + 3865: "xpl", + 3866: "dzdaemon", + 3867: "dzoglserver", + 3868: "diameter", + 3869: "ovsam-mgmt", + 3870: "ovsam-d-agent", + 3871: "avocent-adsap", + 3872: "oem-agent", + 3873: "fagordnc", + 3874: "sixxsconfig", + 3875: "pnbscada", + 3876: "dl-agent", + 3877: "xmpcr-interface", + 3878: "fotogcad", + 3879: "appss-lm", + 3880: "igrs", + 3881: "idac", + 3882: "msdts1", + 3883: "vrpn", + 3884: "softrack-meter", + 3885: "topflow-ssl", + 3886: "nei-management", + 3887: "ciphire-data", + 3888: "ciphire-serv", + 3889: "dandv-tester", + 3890: "ndsconnect", + 3891: "rtc-pm-port", + 3892: "pcc-image-port", + 3893: "cgi-starapi", + 3894: "syam-agent", + 3895: "syam-smc", + 3896: "sdo-tls", + 3897: "sdo-ssh", + 3898: "senip", + 3899: "itv-control", + 3900: "udt-os", + 3901: "nimsh", + 3902: "nimaux", + 3903: "charsetmgr", + 3904: "omnilink-port", + 3905: "mupdate", + 3906: "topovista-data", + 3907: "imoguia-port", + 3908: "hppronetman", + 3909: "surfcontrolcpa", + 3910: "prnrequest", + 3911: "prnstatus", + 3912: "gbmt-stars", + 3913: "listcrt-port", + 3914: "listcrt-port-2", + 3915: "agcat", + 3916: "wysdmc", + 3917: "aftmux", + 3918: "pktcablemmcops", + 3919: "hyperip", + 3920: "exasoftport1", + 3921: "herodotus-net", + 3922: "sor-update", + 3923: "symb-sb-port", + 3924: "mpl-gprs-port", + 3925: "zmp", + 3926: "winport", + 3927: "natdataservice", + 3928: "netboot-pxe", + 3929: "smauth-port", + 3930: "syam-webserver", + 3931: "msr-plugin-port", + 3932: "dyn-site", + 3933: "plbserve-port", + 3934: "sunfm-port", + 3935: "sdp-portmapper", + 3936: "mailprox", + 3937: "dvbservdsc", + 3938: "dbcontrol-agent", + 3939: "aamp", + 3940: "xecp-node", + 3941: "homeportal-web", + 3942: "srdp", + 3943: "tig", + 3944: "sops", + 3945: "emcads", + 3946: "backupedge", + 3947: "ccp", + 3948: "apdap", + 3949: "drip", + 3950: "namemunge", + 3951: "pwgippfax", + 3952: "i3-sessionmgr", + 3953: "xmlink-connect", + 3954: "adrep", + 3955: "p2pcommunity", + 3956: "gvcp", + 3957: "mqe-broker", + 3958: "mqe-agent", + 3959: "treehopper", + 3960: "bess", + 3961: "proaxess", + 3962: "sbi-agent", + 3963: "thrp", + 3964: "sasggprs", + 3965: "ati-ip-to-ncpe", + 3966: "bflckmgr", + 3967: "ppsms", + 3968: "ianywhere-dbns", + 3969: "landmarks", + 3970: "lanrevagent", + 3971: "lanrevserver", + 3972: "iconp", + 3973: "progistics", + 3974: "citysearch", + 3975: "airshot", + 3976: "opswagent", + 3977: "opswmanager", + 3978: "secure-cfg-svr", + 3979: "smwan", + 3980: "acms", + 3981: "starfish", + 3982: "eis", + 3983: "eisp", + 3984: "mapper-nodemgr", + 3985: "mapper-mapethd", + 3986: "mapper-ws-ethd", + 3987: "centerline", + 3988: "dcs-config", + 3989: "bv-queryengine", + 3990: "bv-is", + 3991: "bv-smcsrv", + 3992: "bv-ds", + 3993: "bv-agent", + 3995: "iss-mgmt-ssl", + 3996: "abcsoftware", + 3997: "agentsease-db", + 3998: "dnx", + 3999: "nvcnet", + 4000: "terabase", + 4001: "newoak", + 4002: "pxc-spvr-ft", + 4003: "pxc-splr-ft", + 4004: "pxc-roid", + 4005: "pxc-pin", + 4006: "pxc-spvr", + 4007: "pxc-splr", + 4008: "netcheque", + 4009: "chimera-hwm", + 4010: "samsung-unidex", + 4011: "altserviceboot", + 4012: "pda-gate", + 4013: "acl-manager", + 4014: "taiclock", + 4015: "talarian-mcast1", + 4016: "talarian-mcast2", + 4017: "talarian-mcast3", + 4018: "talarian-mcast4", + 4019: "talarian-mcast5", + 4020: "trap", + 4021: "nexus-portal", + 4022: "dnox", + 4023: "esnm-zoning", + 4024: "tnp1-port", + 4025: "partimage", + 4026: "as-debug", + 4027: "bxp", + 4028: "dtserver-port", + 4029: "ip-qsig", + 4030: "jdmn-port", + 4031: "suucp", + 4032: "vrts-auth-port", + 4033: "sanavigator", + 4034: "ubxd", + 4035: "wap-push-http", + 4036: "wap-push-https", + 4037: "ravehd", + 4038: "fazzt-ptp", + 4039: "fazzt-admin", + 4040: "yo-main", + 4041: "houston", + 4042: "ldxp", + 4043: "nirp", + 4044: "ltp", + 4045: "npp", + 4046: "acp-proto", + 4047: "ctp-state", + 4049: "wafs", + 4050: "cisco-wafs", + 4051: "cppdp", + 4052: "interact", + 4053: "ccu-comm-1", + 4054: "ccu-comm-2", + 4055: "ccu-comm-3", + 4056: "lms", + 4057: "wfm", + 4058: "kingfisher", + 4059: "dlms-cosem", + 4060: "dsmeter-iatc", + 4061: "ice-location", + 4062: "ice-slocation", + 4063: "ice-router", + 4064: "ice-srouter", + 4065: "avanti-cdp", + 4066: "pmas", + 4067: "idp", + 4068: "ipfltbcst", + 4069: "minger", + 4070: "tripe", + 4071: "aibkup", + 4072: "zieto-sock", + 4073: "iRAPP", + 4074: "cequint-cityid", + 4075: "perimlan", + 4076: "seraph", + 4078: "cssp", + 4079: "santools", + 4080: "lorica-in", + 4081: "lorica-in-sec", + 4082: "lorica-out", + 4083: "lorica-out-sec", + 4085: "ezmessagesrv", + 4087: "applusservice", + 4088: "npsp", + 4089: "opencore", + 4090: "omasgport", + 4091: "ewinstaller", + 4092: "ewdgs", + 4093: "pvxpluscs", + 4094: "sysrqd", + 4095: "xtgui", + 4096: "bre", + 4097: "patrolview", + 4098: "drmsfsd", + 4099: "dpcp", + 4100: "igo-incognito", + 4101: "brlp-0", + 4102: "brlp-1", + 4103: "brlp-2", + 4104: "brlp-3", + 4105: "shofar", + 4106: "synchronite", + 4107: "j-ac", + 4108: "accel", + 4109: "izm", + 4110: "g2tag", + 4111: "xgrid", + 4112: "apple-vpns-rp", + 4113: "aipn-reg", + 4114: "jomamqmonitor", + 4115: "cds", + 4116: "smartcard-tls", + 4117: "hillrserv", + 4118: "netscript", + 4119: "assuria-slm", + 4120: "minirem", + 4121: "e-builder", + 4122: "fprams", + 4123: "z-wave", + 4124: "tigv2", + 4125: "opsview-envoy", + 4126: "ddrepl", + 4127: "unikeypro", + 4128: "nufw", + 4129: "nuauth", + 4130: "fronet", + 4131: "stars", + 4132: "nuts-dem", + 4133: "nuts-bootp", + 4134: "nifty-hmi", + 4135: "cl-db-attach", + 4136: "cl-db-request", + 4137: "cl-db-remote", + 4138: "nettest", + 4139: "thrtx", + 4140: "cedros-fds", + 4141: "oirtgsvc", + 4142: "oidocsvc", + 4143: "oidsr", + 4145: "vvr-control", + 4146: "tgcconnect", + 4147: "vrxpservman", + 4148: "hhb-handheld", + 4149: "agslb", + 4150: "PowerAlert-nsa", + 4151: "menandmice-noh", + 4152: "idig-mux", + 4153: "mbl-battd", + 4154: "atlinks", + 4155: "bzr", + 4156: "stat-results", + 4157: "stat-scanner", + 4158: "stat-cc", + 4159: "nss", + 4160: "jini-discovery", + 4161: "omscontact", + 4162: "omstopology", + 4163: "silverpeakpeer", + 4164: "silverpeakcomm", + 4165: "altcp", + 4166: "joost", + 4167: "ddgn", + 4168: "pslicser", + 4169: "iadt", + 4170: "d-cinema-csp", + 4171: "ml-svnet", + 4172: "pcoip", + 4174: "smcluster", + 4175: "bccp", + 4176: "tl-ipcproxy", + 4177: "wello", + 4178: "storman", + 4179: "MaxumSP", + 4180: "httpx", + 4181: "macbak", + 4182: "pcptcpservice", + 4183: "cyborgnet", + 4184: "universe-suite", + 4185: "wcpp", + 4186: "boxbackupstore", + 4187: "csc-proxy", + 4188: "vatata", + 4189: "pcep", + 4190: "sieve", + 4192: "azeti", + 4193: "pvxplusio", + 4197: "hctl", + 4199: "eims-admin", + 4300: "corelccam", + 4301: "d-data", + 4302: "d-data-control", + 4303: "srcp", + 4304: "owserver", + 4305: "batman", + 4306: "pinghgl", + 4307: "trueconf", + 4308: "compx-lockview", + 4309: "dserver", + 4310: "mirrtex", + 4311: "p6ssmc", + 4312: "pscl-mgt", + 4313: "perrla", + 4314: "choiceview-agt", + 4316: "choiceview-clt", + 4320: "fdt-rcatp", + 4321: "rwhois", + 4322: "trim-event", + 4323: "trim-ice", + 4325: "geognosisman", + 4326: "geognosis", + 4327: "jaxer-web", + 4328: "jaxer-manager", + 4329: "publiqare-sync", + 4330: "dey-sapi", + 4331: "ktickets-rest", + 4333: "ahsp", + 4334: "netconf-ch-ssh", + 4335: "netconf-ch-tls", + 4336: "restconf-ch-tls", + 4340: "gaia", + 4341: "lisp-data", + 4342: "lisp-cons", + 4343: "unicall", + 4344: "vinainstall", + 4345: "m4-network-as", + 4346: "elanlm", + 4347: "lansurveyor", + 4348: "itose", + 4349: "fsportmap", + 4350: "net-device", + 4351: "plcy-net-svcs", + 4352: "pjlink", + 4353: "f5-iquery", + 4354: "qsnet-trans", + 4355: "qsnet-workst", + 4356: "qsnet-assist", + 4357: "qsnet-cond", + 4358: "qsnet-nucl", + 4359: "omabcastltkm", + 4360: "matrix-vnet", + 4368: "wxbrief", + 4369: "epmd", + 4370: "elpro-tunnel", + 4371: "l2c-control", + 4372: "l2c-data", + 4373: "remctl", + 4374: "psi-ptt", + 4375: "tolteces", + 4376: "bip", + 4377: "cp-spxsvr", + 4378: "cp-spxdpy", + 4379: "ctdb", + 4389: "xandros-cms", + 4390: "wiegand", + 4391: "apwi-imserver", + 4392: "apwi-rxserver", + 4393: "apwi-rxspooler", + 4395: "omnivisionesx", + 4396: "fly", + 4400: "ds-srv", + 4401: "ds-srvr", + 4402: "ds-clnt", + 4403: "ds-user", + 4404: "ds-admin", + 4405: "ds-mail", + 4406: "ds-slp", + 4407: "nacagent", + 4408: "slscc", + 4409: "netcabinet-com", + 4410: "itwo-server", + 4411: "found", + 4413: "avi-nms", + 4414: "updog", + 4415: "brcd-vr-req", + 4416: "pjj-player", + 4417: "workflowdir", + 4419: "cbp", + 4420: "nvm-express", + 4421: "scaleft", + 4422: "tsepisp", + 4423: "thingkit", + 4425: "netrockey6", + 4426: "beacon-port-2", + 4427: "drizzle", + 4428: "omviserver", + 4429: "omviagent", + 4430: "rsqlserver", + 4431: "wspipe", + 4432: "l-acoustics", + 4433: "vop", + 4442: "saris", + 4443: "pharos", + 4444: "krb524", + 4445: "upnotifyp", + 4446: "n1-fwp", + 4447: "n1-rmgmt", + 4448: "asc-slmd", + 4449: "privatewire", + 4450: "camp", + 4451: "ctisystemmsg", + 4452: "ctiprogramload", + 4453: "nssalertmgr", + 4454: "nssagentmgr", + 4455: "prchat-user", + 4456: "prchat-server", + 4457: "prRegister", + 4458: "mcp", + 4484: "hpssmgmt", + 4485: "assyst-dr", + 4486: "icms", + 4487: "prex-tcp", + 4488: "awacs-ice", + 4500: "ipsec-nat-t", + 4535: "ehs", + 4536: "ehs-ssl", + 4537: "wssauthsvc", + 4538: "swx-gate", + 4545: "worldscores", + 4546: "sf-lm", + 4547: "lanner-lm", + 4548: "synchromesh", + 4549: "aegate", + 4550: "gds-adppiw-db", + 4551: "ieee-mih", + 4552: "menandmice-mon", + 4553: "icshostsvc", + 4554: "msfrs", + 4555: "rsip", + 4556: "dtn-bundle", + 4559: "hylafax", + 4563: "amahi-anywhere", + 4566: "kwtc", + 4567: "tram", + 4568: "bmc-reporting", + 4569: "iax", + 4570: "deploymentmap", + 4573: "cardifftec-back", + 4590: "rid", + 4591: "l3t-at-an", + 4593: "ipt-anri-anri", + 4594: "ias-session", + 4595: "ias-paging", + 4596: "ias-neighbor", + 4597: "a21-an-1xbs", + 4598: "a16-an-an", + 4599: "a17-an-an", + 4600: "piranha1", + 4601: "piranha2", + 4602: "mtsserver", + 4603: "menandmice-upg", + 4604: "irp", + 4605: "sixchat", + 4658: "playsta2-app", + 4659: "playsta2-lob", + 4660: "smaclmgr", + 4661: "kar2ouche", + 4662: "oms", + 4663: "noteit", + 4664: "ems", + 4665: "contclientms", + 4666: "eportcomm", + 4667: "mmacomm", + 4668: "mmaeds", + 4669: "eportcommdata", + 4670: "light", + 4671: "acter", + 4672: "rfa", + 4673: "cxws", + 4674: "appiq-mgmt", + 4675: "dhct-status", + 4676: "dhct-alerts", + 4677: "bcs", + 4678: "traversal", + 4679: "mgesupervision", + 4680: "mgemanagement", + 4681: "parliant", + 4682: "finisar", + 4683: "spike", + 4684: "rfid-rp1", + 4685: "autopac", + 4686: "msp-os", + 4687: "nst", + 4688: "mobile-p2p", + 4689: "altovacentral", + 4690: "prelude", + 4691: "mtn", + 4692: "conspiracy", + 4700: "netxms-agent", + 4701: "netxms-mgmt", + 4702: "netxms-sync", + 4703: "npqes-test", + 4704: "assuria-ins", + 4711: "trinity-dist", + 4725: "truckstar", + 4727: "fcis", + 4728: "capmux", + 4730: "gearman", + 4731: "remcap", + 4733: "resorcs", + 4737: "ipdr-sp", + 4738: "solera-lpn", + 4739: "ipfix", + 4740: "ipfixs", + 4741: "lumimgrd", + 4742: "sicct", + 4743: "openhpid", + 4744: "ifsp", + 4745: "fmp", + 4749: "profilemac", + 4750: "ssad", + 4751: "spocp", + 4752: "snap", + 4753: "simon", + 4756: "RDCenter", + 4774: "converge", + 4784: "bfd-multi-ctl", + 4786: "smart-install", + 4787: "sia-ctrl-plane", + 4788: "xmcp", + 4800: "iims", + 4801: "iwec", + 4802: "ilss", + 4803: "notateit", + 4827: "htcp", + 4837: "varadero-0", + 4838: "varadero-1", + 4839: "varadero-2", + 4840: "opcua-tcp", + 4841: "quosa", + 4842: "gw-asv", + 4843: "opcua-tls", + 4844: "gw-log", + 4845: "wcr-remlib", + 4846: "contamac-icm", + 4847: "wfc", + 4848: "appserv-http", + 4849: "appserv-https", + 4850: "sun-as-nodeagt", + 4851: "derby-repli", + 4867: "unify-debug", + 4868: "phrelay", + 4869: "phrelaydbg", + 4870: "cc-tracking", + 4871: "wired", + 4876: "tritium-can", + 4877: "lmcs", + 4879: "wsdl-event", + 4880: "hislip", + 4883: "wmlserver", + 4884: "hivestor", + 4885: "abbs", + 4894: "lyskom", + 4899: "radmin-port", + 4900: "hfcs", + 4901: "flr-agent", + 4902: "magiccontrol", + 4912: "lutap", + 4913: "lutcp", + 4914: "bones", + 4915: "frcs", + 4940: "eq-office-4940", + 4941: "eq-office-4941", + 4942: "eq-office-4942", + 4949: "munin", + 4950: "sybasesrvmon", + 4951: "pwgwims", + 4952: "sagxtsds", + 4953: "dbsyncarbiter", + 4969: "ccss-qmm", + 4970: "ccss-qsm", + 4971: "burp", + 4984: "webyast", + 4985: "gerhcs", + 4986: "mrip", + 4987: "smar-se-port1", + 4988: "smar-se-port2", + 4989: "parallel", + 4990: "busycal", + 4991: "vrt", + 4999: "hfcs-manager", + 5000: "commplex-main", + 5001: "commplex-link", + 5002: "rfe", + 5003: "fmpro-internal", + 5004: "avt-profile-1", + 5005: "avt-profile-2", + 5006: "wsm-server", + 5007: "wsm-server-ssl", + 5008: "synapsis-edge", + 5009: "winfs", + 5010: "telelpathstart", + 5011: "telelpathattack", + 5012: "nsp", + 5013: "fmpro-v6", + 5015: "fmwp", + 5020: "zenginkyo-1", + 5021: "zenginkyo-2", + 5022: "mice", + 5023: "htuilsrv", + 5024: "scpi-telnet", + 5025: "scpi-raw", + 5026: "strexec-d", + 5027: "strexec-s", + 5028: "qvr", + 5029: "infobright", + 5030: "surfpass", + 5032: "signacert-agent", + 5033: "jtnetd-server", + 5034: "jtnetd-status", + 5042: "asnaacceler8db", + 5043: "swxadmin", + 5044: "lxi-evntsvc", + 5045: "osp", + 5048: "texai", + 5049: "ivocalize", + 5050: "mmcc", + 5051: "ita-agent", + 5052: "ita-manager", + 5053: "rlm", + 5054: "rlm-admin", + 5055: "unot", + 5056: "intecom-ps1", + 5057: "intecom-ps2", + 5059: "sds", + 5060: "sip", + 5061: "sips", + 5062: "na-localise", + 5063: "csrpc", + 5064: "ca-1", + 5065: "ca-2", + 5066: "stanag-5066", + 5067: "authentx", + 5068: "bitforestsrv", + 5069: "i-net-2000-npr", + 5070: "vtsas", + 5071: "powerschool", + 5072: "ayiya", + 5073: "tag-pm", + 5074: "alesquery", + 5075: "pvaccess", + 5080: "onscreen", + 5081: "sdl-ets", + 5082: "qcp", + 5083: "qfp", + 5084: "llrp", + 5085: "encrypted-llrp", + 5086: "aprigo-cs", + 5087: "biotic", + 5093: "sentinel-lm", + 5094: "hart-ip", + 5099: "sentlm-srv2srv", + 5100: "socalia", + 5101: "talarian-tcp", + 5102: "oms-nonsecure", + 5103: "actifio-c2c", + 5106: "actifioudsagent", + 5107: "actifioreplic", + 5111: "taep-as-svc", + 5112: "pm-cmdsvr", + 5114: "ev-services", + 5115: "autobuild", + 5117: "gradecam", + 5120: "barracuda-bbs", + 5133: "nbt-pc", + 5134: "ppactivation", + 5135: "erp-scale", + 5137: "ctsd", + 5145: "rmonitor-secure", + 5146: "social-alarm", + 5150: "atmp", + 5151: "esri-sde", + 5152: "sde-discovery", + 5153: "toruxserver", + 5154: "bzflag", + 5155: "asctrl-agent", + 5156: "rugameonline", + 5157: "mediat", + 5161: "snmpssh", + 5162: "snmpssh-trap", + 5163: "sbackup", + 5164: "vpa", + 5165: "ife-icorp", + 5166: "winpcs", + 5167: "scte104", + 5168: "scte30", + 5172: "pcoip-mgmt", + 5190: "aol", + 5191: "aol-1", + 5192: "aol-2", + 5193: "aol-3", + 5194: "cpscomm", + 5195: "ampl-lic", + 5196: "ampl-tableproxy", + 5197: "tunstall-lwp", + 5200: "targus-getdata", + 5201: "targus-getdata1", + 5202: "targus-getdata2", + 5203: "targus-getdata3", + 5209: "nomad", + 5215: "noteza", + 5221: "3exmp", + 5222: "xmpp-client", + 5223: "hpvirtgrp", + 5224: "hpvirtctrl", + 5225: "hp-server", + 5226: "hp-status", + 5227: "perfd", + 5228: "hpvroom", + 5229: "jaxflow", + 5230: "jaxflow-data", + 5231: "crusecontrol", + 5232: "csedaemon", + 5233: "enfs", + 5234: "eenet", + 5235: "galaxy-network", + 5236: "padl2sim", + 5237: "mnet-discovery", + 5245: "downtools", + 5248: "caacws", + 5249: "caaclang2", + 5250: "soagateway", + 5251: "caevms", + 5252: "movaz-ssc", + 5253: "kpdp", + 5254: "logcabin", + 5264: "3com-njack-1", + 5265: "3com-njack-2", + 5269: "xmpp-server", + 5270: "cartographerxmp", + 5271: "cuelink", + 5272: "pk", + 5280: "xmpp-bosh", + 5281: "undo-lm", + 5282: "transmit-port", + 5298: "presence", + 5299: "nlg-data", + 5300: "hacl-hb", + 5301: "hacl-gs", + 5302: "hacl-cfg", + 5303: "hacl-probe", + 5304: "hacl-local", + 5305: "hacl-test", + 5306: "sun-mc-grp", + 5307: "sco-aip", + 5308: "cfengine", + 5309: "jprinter", + 5310: "outlaws", + 5312: "permabit-cs", + 5313: "rrdp", + 5314: "opalis-rbt-ipc", + 5315: "hacl-poll", + 5316: "hpbladems", + 5317: "hpdevms", + 5318: "pkix-cmc", + 5320: "bsfserver-zn", + 5321: "bsfsvr-zn-ssl", + 5343: "kfserver", + 5344: "xkotodrcp", + 5349: "stuns", + 5352: "dns-llq", + 5353: "mdns", + 5354: "mdnsresponder", + 5355: "llmnr", + 5356: "ms-smlbiz", + 5357: "wsdapi", + 5358: "wsdapi-s", + 5359: "ms-alerter", + 5360: "ms-sideshow", + 5361: "ms-s-sideshow", + 5362: "serverwsd2", + 5363: "net-projection", + 5397: "stresstester", + 5398: "elektron-admin", + 5399: "securitychase", + 5400: "excerpt", + 5401: "excerpts", + 5402: "mftp", + 5403: "hpoms-ci-lstn", + 5404: "hpoms-dps-lstn", + 5405: "netsupport", + 5406: "systemics-sox", + 5407: "foresyte-clear", + 5408: "foresyte-sec", + 5409: "salient-dtasrv", + 5410: "salient-usrmgr", + 5411: "actnet", + 5412: "continuus", + 5413: "wwiotalk", + 5414: "statusd", + 5415: "ns-server", + 5416: "sns-gateway", + 5417: "sns-agent", + 5418: "mcntp", + 5419: "dj-ice", + 5420: "cylink-c", + 5421: "netsupport2", + 5422: "salient-mux", + 5423: "virtualuser", + 5424: "beyond-remote", + 5425: "br-channel", + 5426: "devbasic", + 5427: "sco-peer-tta", + 5428: "telaconsole", + 5429: "base", + 5430: "radec-corp", + 5431: "park-agent", + 5432: "postgresql", + 5433: "pyrrho", + 5434: "sgi-arrayd", + 5435: "sceanics", + 5443: "spss", + 5445: "smbdirect", + 5450: "tiepie", + 5453: "surebox", + 5454: "apc-5454", + 5455: "apc-5455", + 5456: "apc-5456", + 5461: "silkmeter", + 5462: "ttl-publisher", + 5463: "ttlpriceproxy", + 5464: "quailnet", + 5465: "netops-broker", + 5470: "apsolab-col", + 5471: "apsolab-cols", + 5472: "apsolab-tag", + 5473: "apsolab-tags", + 5475: "apsolab-data", + 5500: "fcp-addr-srvr1", + 5501: "fcp-addr-srvr2", + 5502: "fcp-srvr-inst1", + 5503: "fcp-srvr-inst2", + 5504: "fcp-cics-gw1", + 5505: "checkoutdb", + 5506: "amc", + 5507: "psl-management", + 5550: "cbus", + 5553: "sgi-eventmond", + 5554: "sgi-esphttp", + 5555: "personal-agent", + 5556: "freeciv", + 5557: "farenet", + 5565: "hpe-dp-bura", + 5566: "westec-connect", + 5567: "dof-dps-mc-sec", + 5568: "sdt", + 5569: "rdmnet-ctrl", + 5573: "sdmmp", + 5574: "lsi-bobcat", + 5575: "ora-oap", + 5579: "fdtracks", + 5580: "tmosms0", + 5581: "tmosms1", + 5582: "fac-restore", + 5583: "tmo-icon-sync", + 5584: "bis-web", + 5585: "bis-sync", + 5586: "att-mt-sms", + 5597: "ininmessaging", + 5598: "mctfeed", + 5599: "esinstall", + 5600: "esmmanager", + 5601: "esmagent", + 5602: "a1-msc", + 5603: "a1-bs", + 5604: "a3-sdunode", + 5605: "a4-sdunode", + 5618: "efr", + 5627: "ninaf", + 5628: "htrust", + 5629: "symantec-sfdb", + 5630: "precise-comm", + 5631: "pcanywheredata", + 5632: "pcanywherestat", + 5633: "beorl", + 5634: "xprtld", + 5635: "sfmsso", + 5636: "sfm-db-server", + 5637: "cssc", + 5638: "flcrs", + 5639: "ics", + 5646: "vfmobile", + 5666: "nrpe", + 5670: "filemq", + 5671: "amqps", + 5672: "amqp", + 5673: "jms", + 5674: "hyperscsi-port", + 5675: "v5ua", + 5676: "raadmin", + 5677: "questdb2-lnchr", + 5678: "rrac", + 5679: "dccm", + 5680: "auriga-router", + 5681: "ncxcp", + 5688: "ggz", + 5689: "qmvideo", + 5693: "rbsystem", + 5696: "kmip", + 5700: "supportassist", + 5705: "storageos", + 5713: "proshareaudio", + 5714: "prosharevideo", + 5715: "prosharedata", + 5716: "prosharerequest", + 5717: "prosharenotify", + 5718: "dpm", + 5719: "dpm-agent", + 5720: "ms-licensing", + 5721: "dtpt", + 5722: "msdfsr", + 5723: "omhs", + 5724: "omsdk", + 5725: "ms-ilm", + 5726: "ms-ilm-sts", + 5727: "asgenf", + 5728: "io-dist-data", + 5729: "openmail", + 5730: "unieng", + 5741: "ida-discover1", + 5742: "ida-discover2", + 5743: "watchdoc-pod", + 5744: "watchdoc", + 5745: "fcopy-server", + 5746: "fcopys-server", + 5747: "tunatic", + 5748: "tunalyzer", + 5750: "rscd", + 5755: "openmailg", + 5757: "x500ms", + 5766: "openmailns", + 5767: "s-openmail", + 5768: "openmailpxy", + 5769: "spramsca", + 5770: "spramsd", + 5771: "netagent", + 5777: "dali-port", + 5780: "vts-rpc", + 5781: "3par-evts", + 5782: "3par-mgmt", + 5783: "3par-mgmt-ssl", + 5785: "3par-rcopy", + 5793: "xtreamx", + 5813: "icmpd", + 5814: "spt-automation", + 5841: "shiprush-d-ch", + 5842: "reversion", + 5859: "wherehoo", + 5863: "ppsuitemsg", + 5868: "diameters", + 5883: "jute", + 5900: "rfb", + 5910: "cm", + 5911: "cpdlc", + 5912: "fis", + 5913: "ads-c", + 5963: "indy", + 5968: "mppolicy-v5", + 5969: "mppolicy-mgr", + 5984: "couchdb", + 5985: "wsman", + 5986: "wsmans", + 5987: "wbem-rmi", + 5988: "wbem-http", + 5989: "wbem-https", + 5990: "wbem-exp-https", + 5991: "nuxsl", + 5992: "consul-insight", + 5993: "cim-rs", + 5999: "cvsup", + 6064: "ndl-ahp-svc", + 6065: "winpharaoh", + 6066: "ewctsp", + 6068: "gsmp-ancp", + 6069: "trip", + 6070: "messageasap", + 6071: "ssdtp", + 6072: "diagnose-proc", + 6073: "directplay8", + 6074: "max", + 6075: "dpm-acm", + 6076: "msft-dpm-cert", + 6077: "iconstructsrv", + 6084: "reload-config", + 6085: "konspire2b", + 6086: "pdtp", + 6087: "ldss", + 6088: "doglms", + 6099: "raxa-mgmt", + 6100: "synchronet-db", + 6101: "synchronet-rtc", + 6102: "synchronet-upd", + 6103: "rets", + 6104: "dbdb", + 6105: "primaserver", + 6106: "mpsserver", + 6107: "etc-control", + 6108: "sercomm-scadmin", + 6109: "globecast-id", + 6110: "softcm", + 6111: "spc", + 6112: "dtspcd", + 6113: "dayliteserver", + 6114: "wrspice", + 6115: "xic", + 6116: "xtlserv", + 6117: "daylitetouch", + 6121: "spdy", + 6122: "bex-webadmin", + 6123: "backup-express", + 6124: "pnbs", + 6130: "damewaremobgtwy", + 6133: "nbt-wol", + 6140: "pulsonixnls", + 6141: "meta-corp", + 6142: "aspentec-lm", + 6143: "watershed-lm", + 6144: "statsci1-lm", + 6145: "statsci2-lm", + 6146: "lonewolf-lm", + 6147: "montage-lm", + 6148: "ricardo-lm", + 6149: "tal-pod", + 6159: "efb-aci", + 6160: "ecmp", + 6161: "patrol-ism", + 6162: "patrol-coll", + 6163: "pscribe", + 6200: "lm-x", + 6209: "qmtps", + 6222: "radmind", + 6241: "jeol-nsdtp-1", + 6242: "jeol-nsdtp-2", + 6243: "jeol-nsdtp-3", + 6244: "jeol-nsdtp-4", + 6251: "tl1-raw-ssl", + 6252: "tl1-ssh", + 6253: "crip", + 6267: "gld", + 6268: "grid", + 6269: "grid-alt", + 6300: "bmc-grx", + 6301: "bmc-ctd-ldap", + 6306: "ufmp", + 6315: "scup", + 6316: "abb-escp", + 6317: "nav-data-cmd", + 6320: "repsvc", + 6321: "emp-server1", + 6322: "emp-server2", + 6324: "hrd-ncs", + 6325: "dt-mgmtsvc", + 6326: "dt-vra", + 6343: "sflow", + 6344: "streletz", + 6346: "gnutella-svc", + 6347: "gnutella-rtr", + 6350: "adap", + 6355: "pmcs", + 6360: "metaedit-mu", + 6370: "metaedit-se", + 6379: "redis", + 6382: "metatude-mds", + 6389: "clariion-evr01", + 6390: "metaedit-ws", + 6417: "faxcomservice", + 6418: "syserverremote", + 6419: "svdrp", + 6420: "nim-vdrshell", + 6421: "nim-wan", + 6432: "pgbouncer", + 6442: "tarp", + 6443: "sun-sr-https", + 6444: "sge-qmaster", + 6445: "sge-execd", + 6446: "mysql-proxy", + 6455: "skip-cert-recv", + 6456: "skip-cert-send", + 6464: "ieee11073-20701", + 6471: "lvision-lm", + 6480: "sun-sr-http", + 6481: "servicetags", + 6482: "ldoms-mgmt", + 6483: "SunVTS-RMI", + 6484: "sun-sr-jms", + 6485: "sun-sr-iiop", + 6486: "sun-sr-iiops", + 6487: "sun-sr-iiop-aut", + 6488: "sun-sr-jmx", + 6489: "sun-sr-admin", + 6500: "boks", + 6501: "boks-servc", + 6502: "boks-servm", + 6503: "boks-clntd", + 6505: "badm-priv", + 6506: "badm-pub", + 6507: "bdir-priv", + 6508: "bdir-pub", + 6509: "mgcs-mfp-port", + 6510: "mcer-port", + 6513: "netconf-tls", + 6514: "syslog-tls", + 6515: "elipse-rec", + 6543: "lds-distrib", + 6544: "lds-dump", + 6547: "apc-6547", + 6548: "apc-6548", + 6549: "apc-6549", + 6550: "fg-sysupdate", + 6551: "sum", + 6558: "xdsxdm", + 6566: "sane-port", + 6568: "canit-store", + 6579: "affiliate", + 6580: "parsec-master", + 6581: "parsec-peer", + 6582: "parsec-game", + 6583: "joaJewelSuite", + 6600: "mshvlm", + 6601: "mstmg-sstp", + 6602: "wsscomfrmwk", + 6619: "odette-ftps", + 6620: "kftp-data", + 6621: "kftp", + 6622: "mcftp", + 6623: "ktelnet", + 6624: "datascaler-db", + 6625: "datascaler-ctl", + 6626: "wago-service", + 6627: "nexgen", + 6628: "afesc-mc", + 6629: "nexgen-aux", + 6632: "mxodbc-connect", + 6640: "ovsdb", + 6653: "openflow", + 6655: "pcs-sf-ui-man", + 6656: "emgmsg", + 6670: "vocaltec-gold", + 6671: "p4p-portal", + 6672: "vision-server", + 6673: "vision-elmd", + 6678: "vfbp", + 6679: "osaut", + 6687: "clever-ctrace", + 6688: "clever-tcpip", + 6689: "tsa", + 6690: "cleverdetect", + 6697: "ircs-u", + 6701: "kti-icad-srvr", + 6702: "e-design-net", + 6703: "e-design-web", + 6714: "ibprotocol", + 6715: "fibotrader-com", + 6716: "princity-agent", + 6767: "bmc-perf-agent", + 6768: "bmc-perf-mgrd", + 6769: "adi-gxp-srvprt", + 6770: "plysrv-http", + 6771: "plysrv-https", + 6777: "ntz-tracker", + 6778: "ntz-p2p-storage", + 6785: "dgpf-exchg", + 6786: "smc-jmx", + 6787: "smc-admin", + 6788: "smc-http", + 6789: "radg", + 6790: "hnmp", + 6791: "hnm", + 6801: "acnet", + 6817: "pentbox-sim", + 6831: "ambit-lm", + 6841: "netmo-default", + 6842: "netmo-http", + 6850: "iccrushmore", + 6868: "acctopus-cc", + 6888: "muse", + 6900: "rtimeviewer", + 6901: "jetstream", + 6935: "ethoscan", + 6936: "xsmsvc", + 6946: "bioserver", + 6951: "otlp", + 6961: "jmact3", + 6962: "jmevt2", + 6963: "swismgr1", + 6964: "swismgr2", + 6965: "swistrap", + 6966: "swispol", + 6969: "acmsoda", + 6970: "conductor", + 6997: "MobilitySrv", + 6998: "iatp-highpri", + 6999: "iatp-normalpri", + 7000: "afs3-fileserver", + 7001: "afs3-callback", + 7002: "afs3-prserver", + 7003: "afs3-vlserver", + 7004: "afs3-kaserver", + 7005: "afs3-volser", + 7006: "afs3-errors", + 7007: "afs3-bos", + 7008: "afs3-update", + 7009: "afs3-rmtsys", + 7010: "ups-onlinet", + 7011: "talon-disc", + 7012: "talon-engine", + 7013: "microtalon-dis", + 7014: "microtalon-com", + 7015: "talon-webserver", + 7016: "spg", + 7017: "grasp", + 7018: "fisa-svc", + 7019: "doceri-ctl", + 7020: "dpserve", + 7021: "dpserveadmin", + 7022: "ctdp", + 7023: "ct2nmcs", + 7024: "vmsvc", + 7025: "vmsvc-2", + 7030: "op-probe", + 7031: "iposplanet", + 7070: "arcp", + 7071: "iwg1", + 7073: "martalk", + 7080: "empowerid", + 7099: "lazy-ptop", + 7100: "font-service", + 7101: "elcn", + 7117: "rothaga", + 7121: "virprot-lm", + 7128: "scenidm", + 7129: "scenccs", + 7161: "cabsm-comm", + 7162: "caistoragemgr", + 7163: "cacsambroker", + 7164: "fsr", + 7165: "doc-server", + 7166: "aruba-server", + 7167: "casrmagent", + 7168: "cnckadserver", + 7169: "ccag-pib", + 7170: "nsrp", + 7171: "drm-production", + 7172: "metalbend", + 7173: "zsecure", + 7174: "clutild", + 7200: "fodms", + 7201: "dlip", + 7202: "pon-ictp", + 7215: "PS-Server", + 7216: "PS-Capture-Pro", + 7227: "ramp", + 7228: "citrixupp", + 7229: "citrixuppg", + 7236: "display", + 7237: "pads", + 7244: "frc-hicp", + 7262: "cnap", + 7272: "watchme-7272", + 7273: "oma-rlp", + 7274: "oma-rlp-s", + 7275: "oma-ulp", + 7276: "oma-ilp", + 7277: "oma-ilp-s", + 7278: "oma-dcdocbs", + 7279: "ctxlic", + 7280: "itactionserver1", + 7281: "itactionserver2", + 7282: "mzca-action", + 7283: "genstat", + 7365: "lcm-server", + 7391: "mindfilesys", + 7392: "mrssrendezvous", + 7393: "nfoldman", + 7394: "fse", + 7395: "winqedit", + 7397: "hexarc", + 7400: "rtps-discovery", + 7401: "rtps-dd-ut", + 7402: "rtps-dd-mt", + 7410: "ionixnetmon", + 7411: "daqstream", + 7421: "mtportmon", + 7426: "pmdmgr", + 7427: "oveadmgr", + 7428: "ovladmgr", + 7429: "opi-sock", + 7430: "xmpv7", + 7431: "pmd", + 7437: "faximum", + 7443: "oracleas-https", + 7471: "sttunnel", + 7473: "rise", + 7474: "neo4j", + 7478: "openit", + 7491: "telops-lmd", + 7500: "silhouette", + 7501: "ovbus", + 7508: "adcp", + 7509: "acplt", + 7510: "ovhpas", + 7511: "pafec-lm", + 7542: "saratoga", + 7543: "atul", + 7544: "nta-ds", + 7545: "nta-us", + 7546: "cfs", + 7547: "cwmp", + 7548: "tidp", + 7549: "nls-tl", + 7551: "controlone-con", + 7560: "sncp", + 7563: "cfw", + 7566: "vsi-omega", + 7569: "dell-eql-asm", + 7570: "aries-kfinder", + 7574: "coherence", + 7588: "sun-lm", + 7606: "mipi-debug", + 7624: "indi", + 7626: "simco", + 7627: "soap-http", + 7628: "zen-pawn", + 7629: "xdas", + 7630: "hawk", + 7631: "tesla-sys-msg", + 7633: "pmdfmgt", + 7648: "cuseeme", + 7672: "imqstomp", + 7673: "imqstomps", + 7674: "imqtunnels", + 7675: "imqtunnel", + 7676: "imqbrokerd", + 7677: "sun-user-https", + 7680: "pando-pub", + 7683: "dmt", + 7687: "bolt", + 7689: "collaber", + 7697: "klio", + 7700: "em7-secom", + 7707: "sync-em7", + 7708: "scinet", + 7720: "medimageportal", + 7724: "nsdeepfreezectl", + 7725: "nitrogen", + 7726: "freezexservice", + 7727: "trident-data", + 7728: "osvr", + 7734: "smip", + 7738: "aiagent", + 7741: "scriptview", + 7742: "msss", + 7743: "sstp-1", + 7744: "raqmon-pdu", + 7747: "prgp", + 7775: "inetfs", + 7777: "cbt", + 7778: "interwise", + 7779: "vstat", + 7781: "accu-lmgr", + 7786: "minivend", + 7787: "popup-reminders", + 7789: "office-tools", + 7794: "q3ade", + 7797: "pnet-conn", + 7798: "pnet-enc", + 7799: "altbsdp", + 7800: "asr", + 7801: "ssp-client", + 7810: "rbt-wanopt", + 7845: "apc-7845", + 7846: "apc-7846", + 7847: "csoauth", + 7869: "mobileanalyzer", + 7870: "rbt-smc", + 7871: "mdm", + 7878: "owms", + 7880: "pss", + 7887: "ubroker", + 7900: "mevent", + 7901: "tnos-sp", + 7902: "tnos-dp", + 7903: "tnos-dps", + 7913: "qo-secure", + 7932: "t2-drm", + 7933: "t2-brm", + 7962: "generalsync", + 7967: "supercell", + 7979: "micromuse-ncps", + 7980: "quest-vista", + 7981: "sossd-collect", + 7982: "sossd-agent", + 7997: "pushns", + 7999: "irdmi2", + 8000: "irdmi", + 8001: "vcom-tunnel", + 8002: "teradataordbms", + 8003: "mcreport", + 8005: "mxi", + 8006: "wpl-analytics", + 8007: "warppipe", + 8008: "http-alt", + 8019: "qbdb", + 8020: "intu-ec-svcdisc", + 8021: "intu-ec-client", + 8022: "oa-system", + 8025: "ca-audit-da", + 8026: "ca-audit-ds", + 8032: "pro-ed", + 8033: "mindprint", + 8034: "vantronix-mgmt", + 8040: "ampify", + 8041: "enguity-xccetp", + 8042: "fs-agent", + 8043: "fs-server", + 8044: "fs-mgmt", + 8051: "rocrail", + 8052: "senomix01", + 8053: "senomix02", + 8054: "senomix03", + 8055: "senomix04", + 8056: "senomix05", + 8057: "senomix06", + 8058: "senomix07", + 8059: "senomix08", + 8066: "toad-bi-appsrvr", + 8067: "infi-async", + 8070: "ucs-isc", + 8074: "gadugadu", + 8077: "mles", + 8080: "http-alt", + 8081: "sunproxyadmin", + 8082: "us-cli", + 8083: "us-srv", + 8086: "d-s-n", + 8087: "simplifymedia", + 8088: "radan-http", + 8090: "opsmessaging", + 8091: "jamlink", + 8097: "sac", + 8100: "xprint-server", + 8101: "ldoms-migr", + 8102: "kz-migr", + 8115: "mtl8000-matrix", + 8116: "cp-cluster", + 8117: "purityrpc", + 8118: "privoxy", + 8121: "apollo-data", + 8122: "apollo-admin", + 8128: "paycash-online", + 8129: "paycash-wbp", + 8130: "indigo-vrmi", + 8131: "indigo-vbcp", + 8132: "dbabble", + 8140: "puppet", + 8148: "isdd", + 8153: "quantastor", + 8160: "patrol", + 8161: "patrol-snmp", + 8162: "lpar2rrd", + 8181: "intermapper", + 8182: "vmware-fdm", + 8183: "proremote", + 8184: "itach", + 8190: "gcp-rphy", + 8191: "limnerpressure", + 8192: "spytechphone", + 8194: "blp1", + 8195: "blp2", + 8199: "vvr-data", + 8200: "trivnet1", + 8201: "trivnet2", + 8204: "lm-perfworks", + 8205: "lm-instmgr", + 8206: "lm-dta", + 8207: "lm-sserver", + 8208: "lm-webwatcher", + 8230: "rexecj", + 8243: "synapse-nhttps", + 8270: "robot-remote", + 8276: "pando-sec", + 8280: "synapse-nhttp", + 8282: "libelle", + 8292: "blp3", + 8293: "hiperscan-id", + 8294: "blp4", + 8300: "tmi", + 8301: "amberon", + 8313: "hub-open-net", + 8320: "tnp-discover", + 8321: "tnp", + 8322: "garmin-marine", + 8351: "server-find", + 8376: "cruise-enum", + 8377: "cruise-swroute", + 8378: "cruise-config", + 8379: "cruise-diags", + 8380: "cruise-update", + 8383: "m2mservices", + 8400: "cvd", + 8401: "sabarsd", + 8402: "abarsd", + 8403: "admind", + 8404: "svcloud", + 8405: "svbackup", + 8415: "dlpx-sp", + 8416: "espeech", + 8417: "espeech-rtp", + 8423: "aritts", + 8442: "cybro-a-bus", + 8443: "pcsync-https", + 8444: "pcsync-http", + 8445: "copy", + 8450: "npmp", + 8457: "nexentamv", + 8470: "cisco-avp", + 8471: "pim-port", + 8472: "otv", + 8473: "vp2p", + 8474: "noteshare", + 8500: "fmtp", + 8501: "cmtp-mgt", + 8502: "ftnmtp", + 8554: "rtsp-alt", + 8555: "d-fence", + 8567: "dof-tunnel", + 8600: "asterix", + 8610: "canon-mfnp", + 8611: "canon-bjnp1", + 8612: "canon-bjnp2", + 8613: "canon-bjnp3", + 8614: "canon-bjnp4", + 8615: "imink", + 8665: "monetra", + 8666: "monetra-admin", + 8675: "msi-cps-rm", + 8686: "sun-as-jmxrmi", + 8688: "openremote-ctrl", + 8699: "vnyx", + 8711: "nvc", + 8733: "ibus", + 8750: "dey-keyneg", + 8763: "mc-appserver", + 8764: "openqueue", + 8765: "ultraseek-http", + 8766: "amcs", + 8770: "dpap", + 8778: "uec", + 8786: "msgclnt", + 8787: "msgsrvr", + 8793: "acd-pm", + 8800: "sunwebadmin", + 8804: "truecm", + 8873: "dxspider", + 8880: "cddbp-alt", + 8881: "galaxy4d", + 8883: "secure-mqtt", + 8888: "ddi-tcp-1", + 8889: "ddi-tcp-2", + 8890: "ddi-tcp-3", + 8891: "ddi-tcp-4", + 8892: "ddi-tcp-5", + 8893: "ddi-tcp-6", + 8894: "ddi-tcp-7", + 8899: "ospf-lite", + 8900: "jmb-cds1", + 8901: "jmb-cds2", + 8910: "manyone-http", + 8911: "manyone-xml", + 8912: "wcbackup", + 8913: "dragonfly", + 8937: "twds", + 8953: "ub-dns-control", + 8954: "cumulus-admin", + 8980: "nod-provider", + 8989: "sunwebadmins", + 8990: "http-wmap", + 8991: "https-wmap", + 8997: "oracle-ms-ens", + 8998: "canto-roboflow", + 8999: "bctp", + 9000: "cslistener", + 9001: "etlservicemgr", + 9002: "dynamid", + 9005: "golem", + 9008: "ogs-server", + 9009: "pichat", + 9010: "sdr", + 9020: "tambora", + 9021: "panagolin-ident", + 9022: "paragent", + 9023: "swa-1", + 9024: "swa-2", + 9025: "swa-3", + 9026: "swa-4", + 9050: "versiera", + 9051: "fio-cmgmt", + 9060: "CardWeb-IO", + 9080: "glrpc", + 9083: "emc-pp-mgmtsvc", + 9084: "aurora", + 9085: "ibm-rsyscon", + 9086: "net2display", + 9087: "classic", + 9088: "sqlexec", + 9089: "sqlexec-ssl", + 9090: "websm", + 9091: "xmltec-xmlmail", + 9092: "XmlIpcRegSvc", + 9093: "copycat", + 9100: "hp-pdl-datastr", + 9101: "bacula-dir", + 9102: "bacula-fd", + 9103: "bacula-sd", + 9104: "peerwire", + 9105: "xadmin", + 9106: "astergate", + 9107: "astergatefax", + 9119: "mxit", + 9122: "grcmp", + 9123: "grcp", + 9131: "dddp", + 9160: "apani1", + 9161: "apani2", + 9162: "apani3", + 9163: "apani4", + 9164: "apani5", + 9191: "sun-as-jpda", + 9200: "wap-wsp", + 9201: "wap-wsp-wtp", + 9202: "wap-wsp-s", + 9203: "wap-wsp-wtp-s", + 9204: "wap-vcard", + 9205: "wap-vcal", + 9206: "wap-vcard-s", + 9207: "wap-vcal-s", + 9208: "rjcdb-vcards", + 9209: "almobile-system", + 9210: "oma-mlp", + 9211: "oma-mlp-s", + 9212: "serverviewdbms", + 9213: "serverstart", + 9214: "ipdcesgbs", + 9215: "insis", + 9216: "acme", + 9217: "fsc-port", + 9222: "teamcoherence", + 9255: "mon", + 9278: "pegasus", + 9279: "pegasus-ctl", + 9280: "pgps", + 9281: "swtp-port1", + 9282: "swtp-port2", + 9283: "callwaveiam", + 9284: "visd", + 9285: "n2h2server", + 9287: "cumulus", + 9292: "armtechdaemon", + 9293: "storview", + 9294: "armcenterhttp", + 9295: "armcenterhttps", + 9300: "vrace", + 9306: "sphinxql", + 9312: "sphinxapi", + 9318: "secure-ts", + 9321: "guibase", + 9343: "mpidcmgr", + 9344: "mphlpdmc", + 9345: "rancher", + 9346: "ctechlicensing", + 9374: "fjdmimgr", + 9380: "boxp", + 9387: "d2dconfig", + 9388: "d2ddatatrans", + 9389: "adws", + 9390: "otp", + 9396: "fjinvmgr", + 9397: "mpidcagt", + 9400: "sec-t4net-srv", + 9401: "sec-t4net-clt", + 9402: "sec-pc2fax-srv", + 9418: "git", + 9443: "tungsten-https", + 9444: "wso2esb-console", + 9445: "mindarray-ca", + 9450: "sntlkeyssrvr", + 9500: "ismserver", + 9535: "mngsuite", + 9536: "laes-bf", + 9555: "trispen-sra", + 9592: "ldgateway", + 9593: "cba8", + 9594: "msgsys", + 9595: "pds", + 9596: "mercury-disc", + 9597: "pd-admin", + 9598: "vscp", + 9599: "robix", + 9600: "micromuse-ncpw", + 9612: "streamcomm-ds", + 9614: "iadt-tls", + 9616: "erunbook-agent", + 9617: "erunbook-server", + 9618: "condor", + 9628: "odbcpathway", + 9629: "uniport", + 9630: "peoctlr", + 9631: "peocoll", + 9640: "pqsflows", + 9666: "zoomcp", + 9667: "xmms2", + 9668: "tec5-sdctp", + 9694: "client-wakeup", + 9695: "ccnx", + 9700: "board-roar", + 9747: "l5nas-parchan", + 9750: "board-voip", + 9753: "rasadv", + 9762: "tungsten-http", + 9800: "davsrc", + 9801: "sstp-2", + 9802: "davsrcs", + 9875: "sapv1", + 9876: "sd", + 9888: "cyborg-systems", + 9889: "gt-proxy", + 9898: "monkeycom", + 9900: "iua", + 9909: "domaintime", + 9911: "sype-transport", + 9925: "xybrid-cloud", + 9950: "apc-9950", + 9951: "apc-9951", + 9952: "apc-9952", + 9953: "acis", + 9954: "hinp", + 9955: "alljoyn-stm", + 9966: "odnsp", + 9978: "xybrid-rt", + 9979: "visweather", + 9981: "pumpkindb", + 9987: "dsm-scm-target", + 9988: "nsesrvr", + 9990: "osm-appsrvr", + 9991: "osm-oev", + 9992: "palace-1", + 9993: "palace-2", + 9994: "palace-3", + 9995: "palace-4", + 9996: "palace-5", + 9997: "palace-6", + 9998: "distinct32", + 9999: "distinct", + 10000: "ndmp", + 10001: "scp-config", + 10002: "documentum", + 10003: "documentum-s", + 10004: "emcrmirccd", + 10005: "emcrmird", + 10006: "netapp-sync", + 10007: "mvs-capacity", + 10008: "octopus", + 10009: "swdtp-sv", + 10010: "rxapi", + 10020: "abb-hw", + 10050: "zabbix-agent", + 10051: "zabbix-trapper", + 10055: "qptlmd", + 10080: "amanda", + 10081: "famdc", + 10100: "itap-ddtp", + 10101: "ezmeeting-2", + 10102: "ezproxy-2", + 10103: "ezrelay", + 10104: "swdtp", + 10107: "bctp-server", + 10110: "nmea-0183", + 10113: "netiq-endpoint", + 10114: "netiq-qcheck", + 10115: "netiq-endpt", + 10116: "netiq-voipa", + 10117: "iqrm", + 10125: "cimple", + 10128: "bmc-perf-sd", + 10129: "bmc-gms", + 10160: "qb-db-server", + 10161: "snmptls", + 10162: "snmptls-trap", + 10200: "trisoap", + 10201: "rsms", + 10252: "apollo-relay", + 10260: "axis-wimp-port", + 10261: "tile-ml", + 10288: "blocks", + 10321: "cosir", + 10540: "MOS-lower", + 10541: "MOS-upper", + 10542: "MOS-aux", + 10543: "MOS-soap", + 10544: "MOS-soap-opt", + 10548: "serverdocs", + 10631: "printopia", + 10800: "gap", + 10805: "lpdg", + 10809: "nbd", + 10860: "helix", + 10880: "bveapi", + 10933: "octopustentacle", + 10990: "rmiaux", + 11000: "irisa", + 11001: "metasys", + 11095: "weave", + 11103: "origo-sync", + 11104: "netapp-icmgmt", + 11105: "netapp-icdata", + 11106: "sgi-lk", + 11109: "sgi-dmfmgr", + 11110: "sgi-soap", + 11111: "vce", + 11112: "dicom", + 11161: "suncacao-snmp", + 11162: "suncacao-jmxmp", + 11163: "suncacao-rmi", + 11164: "suncacao-csa", + 11165: "suncacao-websvc", + 11172: "oemcacao-jmxmp", + 11173: "t5-straton", + 11174: "oemcacao-rmi", + 11175: "oemcacao-websvc", + 11201: "smsqp", + 11202: "dcsl-backup", + 11208: "wifree", + 11211: "memcache", + 11319: "imip", + 11320: "imip-channels", + 11321: "arena-server", + 11367: "atm-uhas", + 11371: "hkp", + 11489: "asgcypresstcps", + 11600: "tempest-port", + 11623: "emc-xsw-dconfig", + 11720: "h323callsigalt", + 11723: "emc-xsw-dcache", + 11751: "intrepid-ssl", + 11796: "lanschool", + 11876: "xoraya", + 11967: "sysinfo-sp", + 12000: "entextxid", + 12001: "entextnetwk", + 12002: "entexthigh", + 12003: "entextmed", + 12004: "entextlow", + 12005: "dbisamserver1", + 12006: "dbisamserver2", + 12007: "accuracer", + 12008: "accuracer-dbms", + 12010: "edbsrvr", + 12012: "vipera", + 12013: "vipera-ssl", + 12109: "rets-ssl", + 12121: "nupaper-ss", + 12168: "cawas", + 12172: "hivep", + 12300: "linogridengine", + 12302: "rads", + 12321: "warehouse-sss", + 12322: "warehouse", + 12345: "italk", + 12753: "tsaf", + 12865: "netperf", + 13160: "i-zipqd", + 13216: "bcslogc", + 13217: "rs-pias", + 13218: "emc-vcas-tcp", + 13223: "powwow-client", + 13224: "powwow-server", + 13400: "doip-data", + 13720: "bprd", + 13721: "bpdbm", + 13722: "bpjava-msvc", + 13724: "vnetd", + 13782: "bpcd", + 13783: "vopied", + 13785: "nbdb", + 13786: "nomdb", + 13818: "dsmcc-config", + 13819: "dsmcc-session", + 13820: "dsmcc-passthru", + 13821: "dsmcc-download", + 13822: "dsmcc-ccp", + 13823: "bmdss", + 13894: "ucontrol", + 13929: "dta-systems", + 13930: "medevolve", + 14000: "scotty-ft", + 14001: "sua", + 14033: "sage-best-com1", + 14034: "sage-best-com2", + 14141: "vcs-app", + 14142: "icpp", + 14143: "icpps", + 14145: "gcm-app", + 14149: "vrts-tdd", + 14150: "vcscmd", + 14154: "vad", + 14250: "cps", + 14414: "ca-web-update", + 14500: "xpra", + 14936: "hde-lcesrvr-1", + 14937: "hde-lcesrvr-2", + 15000: "hydap", + 15002: "onep-tls", + 15345: "xpilot", + 15363: "3link", + 15555: "cisco-snat", + 15660: "bex-xr", + 15740: "ptp", + 15999: "programmar", + 16000: "fmsas", + 16001: "fmsascon", + 16002: "gsms", + 16020: "jwpc", + 16021: "jwpc-bin", + 16161: "sun-sea-port", + 16162: "solaris-audit", + 16309: "etb4j", + 16310: "pduncs", + 16311: "pdefmns", + 16360: "netserialext1", + 16361: "netserialext2", + 16367: "netserialext3", + 16368: "netserialext4", + 16384: "connected", + 16385: "rdgs", + 16619: "xoms", + 16665: "axon-tunnel", + 16789: "cadsisvr", + 16900: "newbay-snc-mc", + 16950: "sgcip", + 16991: "intel-rci-mp", + 16992: "amt-soap-http", + 16993: "amt-soap-https", + 16994: "amt-redir-tcp", + 16995: "amt-redir-tls", + 17007: "isode-dua", + 17184: "vestasdlp", + 17185: "soundsvirtual", + 17219: "chipper", + 17220: "avtp", + 17221: "avdecc", + 17223: "isa100-gci", + 17225: "trdp-md", + 17234: "integrius-stp", + 17235: "ssh-mgmt", + 17500: "db-lsp", + 17555: "ailith", + 17729: "ea", + 17754: "zep", + 17755: "zigbee-ip", + 17756: "zigbee-ips", + 17777: "sw-orion", + 18000: "biimenu", + 18104: "radpdf", + 18136: "racf", + 18181: "opsec-cvp", + 18182: "opsec-ufp", + 18183: "opsec-sam", + 18184: "opsec-lea", + 18185: "opsec-omi", + 18186: "ohsc", + 18187: "opsec-ela", + 18241: "checkpoint-rtm", + 18242: "iclid", + 18243: "clusterxl", + 18262: "gv-pf", + 18463: "ac-cluster", + 18634: "rds-ib", + 18635: "rds-ip", + 18668: "vdmmesh", + 18769: "ique", + 18881: "infotos", + 18888: "apc-necmp", + 19000: "igrid", + 19007: "scintilla", + 19020: "j-link", + 19191: "opsec-uaa", + 19194: "ua-secureagent", + 19220: "cora", + 19283: "keysrvr", + 19315: "keyshadow", + 19398: "mtrgtrans", + 19410: "hp-sco", + 19411: "hp-sca", + 19412: "hp-sessmon", + 19539: "fxuptp", + 19540: "sxuptp", + 19541: "jcp", + 19998: "iec-104-sec", + 19999: "dnp-sec", + 20000: "dnp", + 20001: "microsan", + 20002: "commtact-http", + 20003: "commtact-https", + 20005: "openwebnet", + 20013: "ss-idi", + 20014: "opendeploy", + 20034: "nburn-id", + 20046: "tmophl7mts", + 20048: "mountd", + 20049: "nfsrdma", + 20057: "avesterra", + 20167: "tolfab", + 20202: "ipdtp-port", + 20222: "ipulse-ics", + 20480: "emwavemsg", + 20670: "track", + 20999: "athand-mmp", + 21000: "irtrans", + 21010: "notezilla-lan", + 21221: "aigairserver", + 21553: "rdm-tfs", + 21554: "dfserver", + 21590: "vofr-gateway", + 21800: "tvpm", + 21845: "webphone", + 21846: "netspeak-is", + 21847: "netspeak-cs", + 21848: "netspeak-acd", + 21849: "netspeak-cps", + 22000: "snapenetio", + 22001: "optocontrol", + 22002: "optohost002", + 22003: "optohost003", + 22004: "optohost004", + 22005: "optohost004", + 22125: "dcap", + 22128: "gsidcap", + 22222: "easyengine", + 22273: "wnn6", + 22305: "cis", + 22335: "shrewd-control", + 22343: "cis-secure", + 22347: "wibukey", + 22350: "codemeter", + 22351: "codemeter-cmwan", + 22537: "caldsoft-backup", + 22555: "vocaltec-wconf", + 22763: "talikaserver", + 22800: "aws-brf", + 22951: "brf-gw", + 23000: "inovaport1", + 23001: "inovaport2", + 23002: "inovaport3", + 23003: "inovaport4", + 23004: "inovaport5", + 23005: "inovaport6", + 23053: "gntp", + 23294: "5afe-dir", + 23333: "elxmgmt", + 23400: "novar-dbase", + 23401: "novar-alarm", + 23402: "novar-global", + 23456: "aequus", + 23457: "aequus-alt", + 23546: "areaguard-neo", + 24000: "med-ltp", + 24001: "med-fsp-rx", + 24002: "med-fsp-tx", + 24003: "med-supp", + 24004: "med-ovw", + 24005: "med-ci", + 24006: "med-net-svc", + 24242: "filesphere", + 24249: "vista-4gl", + 24321: "ild", + 24386: "intel-rci", + 24465: "tonidods", + 24554: "binkp", + 24577: "bilobit", + 24666: "sdtvwcam", + 24676: "canditv", + 24677: "flashfiler", + 24678: "proactivate", + 24680: "tcc-http", + 24754: "cslg", + 24922: "find", + 25000: "icl-twobase1", + 25001: "icl-twobase2", + 25002: "icl-twobase3", + 25003: "icl-twobase4", + 25004: "icl-twobase5", + 25005: "icl-twobase6", + 25006: "icl-twobase7", + 25007: "icl-twobase8", + 25008: "icl-twobase9", + 25009: "icl-twobase10", + 25576: "sauterdongle", + 25604: "idtp", + 25793: "vocaltec-hos", + 25900: "tasp-net", + 25901: "niobserver", + 25902: "nilinkanalyst", + 25903: "niprobe", + 26000: "quake", + 26133: "scscp", + 26208: "wnn6-ds", + 26257: "cockroach", + 26260: "ezproxy", + 26261: "ezmeeting", + 26262: "k3software-svr", + 26263: "k3software-cli", + 26486: "exoline-tcp", + 26487: "exoconfig", + 26489: "exonet", + 27345: "imagepump", + 27442: "jesmsjc", + 27504: "kopek-httphead", + 27782: "ars-vista", + 27876: "astrolink", + 27999: "tw-auth-key", + 28000: "nxlmd", + 28001: "pqsp", + 28200: "voxelstorm", + 28240: "siemensgsm", + 28589: "bosswave", + 29167: "otmp", + 29999: "bingbang", + 30000: "ndmps", + 30001: "pago-services1", + 30002: "pago-services2", + 30003: "amicon-fpsu-ra", + 30100: "rwp", + 30260: "kingdomsonline", + 30400: "gs-realtime", + 30999: "ovobs", + 31016: "ka-sddp", + 31020: "autotrac-acp", + 31400: "pace-licensed", + 31416: "xqosd", + 31457: "tetrinet", + 31620: "lm-mon", + 31685: "dsx-monitor", + 31765: "gamesmith-port", + 31948: "iceedcp-tx", + 31949: "iceedcp-rx", + 32034: "iracinghelper", + 32249: "t1distproc60", + 32400: "plex", + 32483: "apm-link", + 32635: "sec-ntb-clnt", + 32636: "DMExpress", + 32767: "filenet-powsrm", + 32768: "filenet-tms", + 32769: "filenet-rpc", + 32770: "filenet-nch", + 32771: "filenet-rmi", + 32772: "filenet-pa", + 32773: "filenet-cm", + 32774: "filenet-re", + 32775: "filenet-pch", + 32776: "filenet-peior", + 32777: "filenet-obrok", + 32801: "mlsn", + 32811: "retp", + 32896: "idmgratm", + 33060: "mysqlx", + 33123: "aurora-balaena", + 33331: "diamondport", + 33333: "dgi-serv", + 33334: "speedtrace", + 33434: "traceroute", + 33656: "snip-slave", + 34249: "turbonote-2", + 34378: "p-net-local", + 34379: "p-net-remote", + 34567: "dhanalakshmi", + 34962: "profinet-rt", + 34963: "profinet-rtm", + 34964: "profinet-cm", + 34980: "ethercat", + 35000: "heathview", + 35001: "rt-viewer", + 35002: "rt-sound", + 35003: "rt-devicemapper", + 35004: "rt-classmanager", + 35005: "rt-labtracker", + 35006: "rt-helper", + 35100: "axio-disc", + 35354: "kitim", + 35355: "altova-lm", + 35356: "guttersnex", + 35357: "openstack-id", + 36001: "allpeers", + 36524: "febooti-aw", + 36602: "observium-agent", + 36700: "mapx", + 36865: "kastenxpipe", + 37475: "neckar", + 37483: "gdrive-sync", + 37601: "eftp", + 37654: "unisys-eportal", + 38000: "ivs-database", + 38001: "ivs-insertion", + 38002: "cresco-control", + 38201: "galaxy7-data", + 38202: "fairview", + 38203: "agpolicy", + 38800: "sruth", + 38865: "secrmmsafecopya", + 39681: "turbonote-1", + 40000: "safetynetp", + 40404: "sptx", + 40841: "cscp", + 40842: "csccredir", + 40843: "csccfirewall", + 41111: "fs-qos", + 41121: "tentacle", + 41230: "z-wave-s", + 41794: "crestron-cip", + 41795: "crestron-ctp", + 41796: "crestron-cips", + 41797: "crestron-ctps", + 42508: "candp", + 42509: "candrp", + 42510: "caerpc", + 43000: "recvr-rc", + 43188: "reachout", + 43189: "ndm-agent-port", + 43190: "ip-provision", + 43191: "noit-transport", + 43210: "shaperai", + 43439: "eq3-update", + 43440: "ew-mgmt", + 43441: "ciscocsdb", + 44123: "z-wave-tunnel", + 44321: "pmcd", + 44322: "pmcdproxy", + 44323: "pmwebapi", + 44444: "cognex-dataman", + 44553: "rbr-debug", + 44818: "EtherNet-IP-2", + 44900: "m3da", + 45000: "asmp", + 45001: "asmps", + 45002: "rs-status", + 45045: "synctest", + 45054: "invision-ag", + 45514: "cloudcheck", + 45678: "eba", + 45824: "dai-shell", + 45825: "qdb2service", + 45966: "ssr-servermgr", + 46336: "inedo", + 46998: "spremotetablet", + 46999: "mediabox", + 47000: "mbus", + 47001: "winrm", + 47557: "dbbrowse", + 47624: "directplaysrvr", + 47806: "ap", + 47808: "bacnet", + 48000: "nimcontroller", + 48001: "nimspooler", + 48002: "nimhub", + 48003: "nimgtw", + 48004: "nimbusdb", + 48005: "nimbusdbctrl", + 48049: "3gpp-cbsp", + 48050: "weandsf", + 48128: "isnetserv", + 48129: "blp5", + 48556: "com-bardac-dw", + 48619: "iqobject", + 48653: "robotraconteur", + 49000: "matahari", + 49001: "nusrp", +} +var udpPortNames = map[UDPPort]string{ + 1: "tcpmux", + 2: "compressnet", + 3: "compressnet", + 5: "rje", + 7: "echo", + 9: "discard", + 11: "systat", + 13: "daytime", + 17: "qotd", + 18: "msp", + 19: "chargen", + 20: "ftp-data", + 21: "ftp", + 22: "ssh", + 23: "telnet", + 25: "smtp", + 27: "nsw-fe", + 29: "msg-icp", + 31: "msg-auth", + 33: "dsp", + 37: "time", + 38: "rap", + 39: "rlp", + 41: "graphics", + 42: "name", + 43: "nicname", + 44: "mpm-flags", + 45: "mpm", + 46: "mpm-snd", + 48: "auditd", + 49: "tacacs", + 50: "re-mail-ck", + 52: "xns-time", + 53: "domain", + 54: "xns-ch", + 55: "isi-gl", + 56: "xns-auth", + 58: "xns-mail", + 62: "acas", + 63: "whoispp", + 64: "covia", + 65: "tacacs-ds", + 66: "sql-net", + 67: "bootps", + 68: "bootpc", + 69: "tftp", + 70: "gopher", + 71: "netrjs-1", + 72: "netrjs-2", + 73: "netrjs-3", + 74: "netrjs-4", + 76: "deos", + 78: "vettcp", + 79: "finger", + 80: "http", + 82: "xfer", + 83: "mit-ml-dev", + 84: "ctf", + 85: "mit-ml-dev", + 86: "mfcobol", + 88: "kerberos", + 89: "su-mit-tg", + 90: "dnsix", + 91: "mit-dov", + 92: "npp", + 93: "dcp", + 94: "objcall", + 95: "supdup", + 96: "dixie", + 97: "swift-rvf", + 98: "tacnews", + 99: "metagram", + 101: "hostname", + 102: "iso-tsap", + 103: "gppitnp", + 104: "acr-nema", + 105: "cso", + 106: "3com-tsmux", + 107: "rtelnet", + 108: "snagas", + 109: "pop2", + 110: "pop3", + 111: "sunrpc", + 112: "mcidas", + 113: "auth", + 115: "sftp", + 116: "ansanotify", + 117: "uucp-path", + 118: "sqlserv", + 119: "nntp", + 120: "cfdptkt", + 121: "erpc", + 122: "smakynet", + 123: "ntp", + 124: "ansatrader", + 125: "locus-map", + 126: "nxedit", + 127: "locus-con", + 128: "gss-xlicen", + 129: "pwdgen", + 130: "cisco-fna", + 131: "cisco-tna", + 132: "cisco-sys", + 133: "statsrv", + 134: "ingres-net", + 135: "epmap", + 136: "profile", + 137: "netbios-ns", + 138: "netbios-dgm", + 139: "netbios-ssn", + 140: "emfis-data", + 141: "emfis-cntl", + 142: "bl-idm", + 143: "imap", + 144: "uma", + 145: "uaac", + 146: "iso-tp0", + 147: "iso-ip", + 148: "jargon", + 149: "aed-512", + 150: "sql-net", + 151: "hems", + 152: "bftp", + 153: "sgmp", + 154: "netsc-prod", + 155: "netsc-dev", + 156: "sqlsrv", + 157: "knet-cmp", + 158: "pcmail-srv", + 159: "nss-routing", + 160: "sgmp-traps", + 161: "snmp", + 162: "snmptrap", + 163: "cmip-man", + 164: "cmip-agent", + 165: "xns-courier", + 166: "s-net", + 167: "namp", + 168: "rsvd", + 169: "send", + 170: "print-srv", + 171: "multiplex", + 172: "cl-1", + 173: "xyplex-mux", + 174: "mailq", + 175: "vmnet", + 176: "genrad-mux", + 177: "xdmcp", + 178: "nextstep", + 179: "bgp", + 180: "ris", + 181: "unify", + 182: "audit", + 183: "ocbinder", + 184: "ocserver", + 185: "remote-kis", + 186: "kis", + 187: "aci", + 188: "mumps", + 189: "qft", + 190: "gacp", + 191: "prospero", + 192: "osu-nms", + 193: "srmp", + 194: "irc", + 195: "dn6-nlm-aud", + 196: "dn6-smm-red", + 197: "dls", + 198: "dls-mon", + 199: "smux", + 200: "src", + 201: "at-rtmp", + 202: "at-nbp", + 203: "at-3", + 204: "at-echo", + 205: "at-5", + 206: "at-zis", + 207: "at-7", + 208: "at-8", + 209: "qmtp", + 210: "z39-50", + 211: "914c-g", + 212: "anet", + 213: "ipx", + 214: "vmpwscs", + 215: "softpc", + 216: "CAIlic", + 217: "dbase", + 218: "mpp", + 219: "uarps", + 220: "imap3", + 221: "fln-spx", + 222: "rsh-spx", + 223: "cdc", + 224: "masqdialer", + 242: "direct", + 243: "sur-meas", + 244: "inbusiness", + 245: "link", + 246: "dsp3270", + 247: "subntbcst-tftp", + 248: "bhfhs", + 256: "rap", + 257: "set", + 259: "esro-gen", + 260: "openport", + 261: "nsiiops", + 262: "arcisdms", + 263: "hdap", + 264: "bgmp", + 265: "x-bone-ctl", + 266: "sst", + 267: "td-service", + 268: "td-replica", + 269: "manet", + 270: "gist", + 280: "http-mgmt", + 281: "personal-link", + 282: "cableport-ax", + 283: "rescap", + 284: "corerjd", + 286: "fxp", + 287: "k-block", + 308: "novastorbakcup", + 309: "entrusttime", + 310: "bhmds", + 311: "asip-webadmin", + 312: "vslmp", + 313: "magenta-logic", + 314: "opalis-robot", + 315: "dpsi", + 316: "decauth", + 317: "zannet", + 318: "pkix-timestamp", + 319: "ptp-event", + 320: "ptp-general", + 321: "pip", + 322: "rtsps", + 333: "texar", + 344: "pdap", + 345: "pawserv", + 346: "zserv", + 347: "fatserv", + 348: "csi-sgwp", + 349: "mftp", + 350: "matip-type-a", + 351: "matip-type-b", + 352: "dtag-ste-sb", + 353: "ndsauth", + 354: "bh611", + 355: "datex-asn", + 356: "cloanto-net-1", + 357: "bhevent", + 358: "shrinkwrap", + 359: "nsrmp", + 360: "scoi2odialog", + 361: "semantix", + 362: "srssend", + 363: "rsvp-tunnel", + 364: "aurora-cmgr", + 365: "dtk", + 366: "odmr", + 367: "mortgageware", + 368: "qbikgdp", + 369: "rpc2portmap", + 370: "codaauth2", + 371: "clearcase", + 372: "ulistproc", + 373: "legent-1", + 374: "legent-2", + 375: "hassle", + 376: "nip", + 377: "tnETOS", + 378: "dsETOS", + 379: "is99c", + 380: "is99s", + 381: "hp-collector", + 382: "hp-managed-node", + 383: "hp-alarm-mgr", + 384: "arns", + 385: "ibm-app", + 386: "asa", + 387: "aurp", + 388: "unidata-ldm", + 389: "ldap", + 390: "uis", + 391: "synotics-relay", + 392: "synotics-broker", + 393: "meta5", + 394: "embl-ndt", + 395: "netcp", + 396: "netware-ip", + 397: "mptn", + 398: "kryptolan", + 399: "iso-tsap-c2", + 400: "osb-sd", + 401: "ups", + 402: "genie", + 403: "decap", + 404: "nced", + 405: "ncld", + 406: "imsp", + 407: "timbuktu", + 408: "prm-sm", + 409: "prm-nm", + 410: "decladebug", + 411: "rmt", + 412: "synoptics-trap", + 413: "smsp", + 414: "infoseek", + 415: "bnet", + 416: "silverplatter", + 417: "onmux", + 418: "hyper-g", + 419: "ariel1", + 420: "smpte", + 421: "ariel2", + 422: "ariel3", + 423: "opc-job-start", + 424: "opc-job-track", + 425: "icad-el", + 426: "smartsdp", + 427: "svrloc", + 428: "ocs-cmu", + 429: "ocs-amu", + 430: "utmpsd", + 431: "utmpcd", + 432: "iasd", + 433: "nnsp", + 434: "mobileip-agent", + 435: "mobilip-mn", + 436: "dna-cml", + 437: "comscm", + 438: "dsfgw", + 439: "dasp", + 440: "sgcp", + 441: "decvms-sysmgt", + 442: "cvc-hostd", + 443: "https", + 444: "snpp", + 445: "microsoft-ds", + 446: "ddm-rdb", + 447: "ddm-dfm", + 448: "ddm-ssl", + 449: "as-servermap", + 450: "tserver", + 451: "sfs-smp-net", + 452: "sfs-config", + 453: "creativeserver", + 454: "contentserver", + 455: "creativepartnr", + 456: "macon-udp", + 457: "scohelp", + 458: "appleqtc", + 459: "ampr-rcmd", + 460: "skronk", + 461: "datasurfsrv", + 462: "datasurfsrvsec", + 463: "alpes", + 464: "kpasswd", + 465: "igmpv3lite", + 466: "digital-vrc", + 467: "mylex-mapd", + 468: "photuris", + 469: "rcp", + 470: "scx-proxy", + 471: "mondex", + 472: "ljk-login", + 473: "hybrid-pop", + 474: "tn-tl-w2", + 475: "tcpnethaspsrv", + 476: "tn-tl-fd1", + 477: "ss7ns", + 478: "spsc", + 479: "iafserver", + 480: "iafdbase", + 481: "ph", + 482: "bgs-nsi", + 483: "ulpnet", + 484: "integra-sme", + 485: "powerburst", + 486: "avian", + 487: "saft", + 488: "gss-http", + 489: "nest-protocol", + 490: "micom-pfs", + 491: "go-login", + 492: "ticf-1", + 493: "ticf-2", + 494: "pov-ray", + 495: "intecourier", + 496: "pim-rp-disc", + 497: "retrospect", + 498: "siam", + 499: "iso-ill", + 500: "isakmp", + 501: "stmf", + 502: "mbap", + 503: "intrinsa", + 504: "citadel", + 505: "mailbox-lm", + 506: "ohimsrv", + 507: "crs", + 508: "xvttp", + 509: "snare", + 510: "fcp", + 511: "passgo", + 512: "comsat", + 513: "who", + 514: "syslog", + 515: "printer", + 516: "videotex", + 517: "talk", + 518: "ntalk", + 519: "utime", + 520: "router", + 521: "ripng", + 522: "ulp", + 523: "ibm-db2", + 524: "ncp", + 525: "timed", + 526: "tempo", + 527: "stx", + 528: "custix", + 529: "irc-serv", + 530: "courier", + 531: "conference", + 532: "netnews", + 533: "netwall", + 534: "windream", + 535: "iiop", + 536: "opalis-rdv", + 537: "nmsp", + 538: "gdomap", + 539: "apertus-ldp", + 540: "uucp", + 541: "uucp-rlogin", + 542: "commerce", + 543: "klogin", + 544: "kshell", + 545: "appleqtcsrvr", + 546: "dhcpv6-client", + 547: "dhcpv6-server", + 548: "afpovertcp", + 549: "idfp", + 550: "new-rwho", + 551: "cybercash", + 552: "devshr-nts", + 553: "pirp", + 554: "rtsp", + 555: "dsf", + 556: "remotefs", + 557: "openvms-sysipc", + 558: "sdnskmp", + 559: "teedtap", + 560: "rmonitor", + 561: "monitor", + 562: "chshell", + 563: "nntps", + 564: "9pfs", + 565: "whoami", + 566: "streettalk", + 567: "banyan-rpc", + 568: "ms-shuttle", + 569: "ms-rome", + 570: "meter", + 571: "meter", + 572: "sonar", + 573: "banyan-vip", + 574: "ftp-agent", + 575: "vemmi", + 576: "ipcd", + 577: "vnas", + 578: "ipdd", + 579: "decbsrv", + 580: "sntp-heartbeat", + 581: "bdp", + 582: "scc-security", + 583: "philips-vc", + 584: "keyserver", + 586: "password-chg", + 587: "submission", + 588: "cal", + 589: "eyelink", + 590: "tns-cml", + 591: "http-alt", + 592: "eudora-set", + 593: "http-rpc-epmap", + 594: "tpip", + 595: "cab-protocol", + 596: "smsd", + 597: "ptcnameservice", + 598: "sco-websrvrmg3", + 599: "acp", + 600: "ipcserver", + 601: "syslog-conn", + 602: "xmlrpc-beep", + 603: "idxp", + 604: "tunnel", + 605: "soap-beep", + 606: "urm", + 607: "nqs", + 608: "sift-uft", + 609: "npmp-trap", + 610: "npmp-local", + 611: "npmp-gui", + 612: "hmmp-ind", + 613: "hmmp-op", + 614: "sshell", + 615: "sco-inetmgr", + 616: "sco-sysmgr", + 617: "sco-dtmgr", + 618: "dei-icda", + 619: "compaq-evm", + 620: "sco-websrvrmgr", + 621: "escp-ip", + 622: "collaborator", + 623: "asf-rmcp", + 624: "cryptoadmin", + 625: "dec-dlm", + 626: "asia", + 627: "passgo-tivoli", + 628: "qmqp", + 629: "3com-amp3", + 630: "rda", + 631: "ipp", + 632: "bmpp", + 633: "servstat", + 634: "ginad", + 635: "rlzdbase", + 636: "ldaps", + 637: "lanserver", + 638: "mcns-sec", + 639: "msdp", + 640: "entrust-sps", + 641: "repcmd", + 642: "esro-emsdp", + 643: "sanity", + 644: "dwr", + 645: "pssc", + 646: "ldp", + 647: "dhcp-failover", + 648: "rrp", + 649: "cadview-3d", + 650: "obex", + 651: "ieee-mms", + 652: "hello-port", + 653: "repscmd", + 654: "aodv", + 655: "tinc", + 656: "spmp", + 657: "rmc", + 658: "tenfold", + 660: "mac-srvr-admin", + 661: "hap", + 662: "pftp", + 663: "purenoise", + 664: "asf-secure-rmcp", + 665: "sun-dr", + 666: "mdqs", + 667: "disclose", + 668: "mecomm", + 669: "meregister", + 670: "vacdsm-sws", + 671: "vacdsm-app", + 672: "vpps-qua", + 673: "cimplex", + 674: "acap", + 675: "dctp", + 676: "vpps-via", + 677: "vpp", + 678: "ggf-ncp", + 679: "mrm", + 680: "entrust-aaas", + 681: "entrust-aams", + 682: "xfr", + 683: "corba-iiop", + 684: "corba-iiop-ssl", + 685: "mdc-portmapper", + 686: "hcp-wismar", + 687: "asipregistry", + 688: "realm-rusd", + 689: "nmap", + 690: "vatp", + 691: "msexch-routing", + 692: "hyperwave-isp", + 693: "connendp", + 694: "ha-cluster", + 695: "ieee-mms-ssl", + 696: "rushd", + 697: "uuidgen", + 698: "olsr", + 699: "accessnetwork", + 700: "epp", + 701: "lmp", + 702: "iris-beep", + 704: "elcsd", + 705: "agentx", + 706: "silc", + 707: "borland-dsj", + 709: "entrust-kmsh", + 710: "entrust-ash", + 711: "cisco-tdp", + 712: "tbrpf", + 713: "iris-xpc", + 714: "iris-xpcs", + 715: "iris-lwz", + 716: "pana", + 729: "netviewdm1", + 730: "netviewdm2", + 731: "netviewdm3", + 741: "netgw", + 742: "netrcs", + 744: "flexlm", + 747: "fujitsu-dev", + 748: "ris-cm", + 749: "kerberos-adm", + 750: "loadav", + 751: "pump", + 752: "qrh", + 753: "rrh", + 754: "tell", + 758: "nlogin", + 759: "con", + 760: "ns", + 761: "rxe", + 762: "quotad", + 763: "cycleserv", + 764: "omserv", + 765: "webster", + 767: "phonebook", + 769: "vid", + 770: "cadlock", + 771: "rtip", + 772: "cycleserv2", + 773: "notify", + 774: "acmaint-dbd", + 775: "acmaint-transd", + 776: "wpages", + 777: "multiling-http", + 780: "wpgs", + 800: "mdbs-daemon", + 801: "device", + 802: "mbap-s", + 810: "fcp-udp", + 828: "itm-mcell-s", + 829: "pkix-3-ca-ra", + 830: "netconf-ssh", + 831: "netconf-beep", + 832: "netconfsoaphttp", + 833: "netconfsoapbeep", + 847: "dhcp-failover2", + 848: "gdoi", + 853: "domain-s", + 854: "dlep", + 860: "iscsi", + 861: "owamp-control", + 862: "twamp-control", + 873: "rsync", + 886: "iclcnet-locate", + 887: "iclcnet-svinfo", + 888: "accessbuilder", + 900: "omginitialrefs", + 901: "smpnameres", + 902: "ideafarm-door", + 903: "ideafarm-panic", + 910: "kink", + 911: "xact-backup", + 912: "apex-mesh", + 913: "apex-edge", + 989: "ftps-data", + 990: "ftps", + 991: "nas", + 992: "telnets", + 993: "imaps", + 995: "pop3s", + 996: "vsinet", + 997: "maitrd", + 998: "puparp", + 999: "applix", + 1000: "cadlock2", + 1010: "surf", + 1021: "exp1", + 1022: "exp2", + 1025: "blackjack", + 1026: "cap", + 1027: "6a44", + 1029: "solid-mux", + 1033: "netinfo-local", + 1034: "activesync", + 1035: "mxxrlogin", + 1036: "nsstp", + 1037: "ams", + 1038: "mtqp", + 1039: "sbl", + 1040: "netarx", + 1041: "danf-ak2", + 1042: "afrog", + 1043: "boinc-client", + 1044: "dcutility", + 1045: "fpitp", + 1046: "wfremotertm", + 1047: "neod1", + 1048: "neod2", + 1049: "td-postman", + 1050: "cma", + 1051: "optima-vnet", + 1052: "ddt", + 1053: "remote-as", + 1054: "brvread", + 1055: "ansyslmd", + 1056: "vfo", + 1057: "startron", + 1058: "nim", + 1059: "nimreg", + 1060: "polestar", + 1061: "kiosk", + 1062: "veracity", + 1063: "kyoceranetdev", + 1064: "jstel", + 1065: "syscomlan", + 1066: "fpo-fns", + 1067: "instl-boots", + 1068: "instl-bootc", + 1069: "cognex-insight", + 1070: "gmrupdateserv", + 1071: "bsquare-voip", + 1072: "cardax", + 1073: "bridgecontrol", + 1074: "warmspotMgmt", + 1075: "rdrmshc", + 1076: "dab-sti-c", + 1077: "imgames", + 1078: "avocent-proxy", + 1079: "asprovatalk", + 1080: "socks", + 1081: "pvuniwien", + 1082: "amt-esd-prot", + 1083: "ansoft-lm-1", + 1084: "ansoft-lm-2", + 1085: "webobjects", + 1086: "cplscrambler-lg", + 1087: "cplscrambler-in", + 1088: "cplscrambler-al", + 1089: "ff-annunc", + 1090: "ff-fms", + 1091: "ff-sm", + 1092: "obrpd", + 1093: "proofd", + 1094: "rootd", + 1095: "nicelink", + 1096: "cnrprotocol", + 1097: "sunclustermgr", + 1098: "rmiactivation", + 1099: "rmiregistry", + 1100: "mctp", + 1101: "pt2-discover", + 1102: "adobeserver-1", + 1103: "adobeserver-2", + 1104: "xrl", + 1105: "ftranhc", + 1106: "isoipsigport-1", + 1107: "isoipsigport-2", + 1108: "ratio-adp", + 1110: "nfsd-keepalive", + 1111: "lmsocialserver", + 1112: "icp", + 1113: "ltp-deepspace", + 1114: "mini-sql", + 1115: "ardus-trns", + 1116: "ardus-cntl", + 1117: "ardus-mtrns", + 1118: "sacred", + 1119: "bnetgame", + 1120: "bnetfile", + 1121: "rmpp", + 1122: "availant-mgr", + 1123: "murray", + 1124: "hpvmmcontrol", + 1125: "hpvmmagent", + 1126: "hpvmmdata", + 1127: "kwdb-commn", + 1128: "saphostctrl", + 1129: "saphostctrls", + 1130: "casp", + 1131: "caspssl", + 1132: "kvm-via-ip", + 1133: "dfn", + 1134: "aplx", + 1135: "omnivision", + 1136: "hhb-gateway", + 1137: "trim", + 1138: "encrypted-admin", + 1139: "evm", + 1140: "autonoc", + 1141: "mxomss", + 1142: "edtools", + 1143: "imyx", + 1144: "fuscript", + 1145: "x9-icue", + 1146: "audit-transfer", + 1147: "capioverlan", + 1148: "elfiq-repl", + 1149: "bvtsonar", + 1150: "blaze", + 1151: "unizensus", + 1152: "winpoplanmess", + 1153: "c1222-acse", + 1154: "resacommunity", + 1155: "nfa", + 1156: "iascontrol-oms", + 1157: "iascontrol", + 1158: "dbcontrol-oms", + 1159: "oracle-oms", + 1160: "olsv", + 1161: "health-polling", + 1162: "health-trap", + 1163: "sddp", + 1164: "qsm-proxy", + 1165: "qsm-gui", + 1166: "qsm-remote", + 1167: "cisco-ipsla", + 1168: "vchat", + 1169: "tripwire", + 1170: "atc-lm", + 1171: "atc-appserver", + 1172: "dnap", + 1173: "d-cinema-rrp", + 1174: "fnet-remote-ui", + 1175: "dossier", + 1176: "indigo-server", + 1177: "dkmessenger", + 1178: "sgi-storman", + 1179: "b2n", + 1180: "mc-client", + 1181: "3comnetman", + 1182: "accelenet-data", + 1183: "llsurfup-http", + 1184: "llsurfup-https", + 1185: "catchpole", + 1186: "mysql-cluster", + 1187: "alias", + 1188: "hp-webadmin", + 1189: "unet", + 1190: "commlinx-avl", + 1191: "gpfs", + 1192: "caids-sensor", + 1193: "fiveacross", + 1194: "openvpn", + 1195: "rsf-1", + 1196: "netmagic", + 1197: "carrius-rshell", + 1198: "cajo-discovery", + 1199: "dmidi", + 1200: "scol", + 1201: "nucleus-sand", + 1202: "caiccipc", + 1203: "ssslic-mgr", + 1204: "ssslog-mgr", + 1205: "accord-mgc", + 1206: "anthony-data", + 1207: "metasage", + 1208: "seagull-ais", + 1209: "ipcd3", + 1210: "eoss", + 1211: "groove-dpp", + 1212: "lupa", + 1213: "mpc-lifenet", + 1214: "kazaa", + 1215: "scanstat-1", + 1216: "etebac5", + 1217: "hpss-ndapi", + 1218: "aeroflight-ads", + 1219: "aeroflight-ret", + 1220: "qt-serveradmin", + 1221: "sweetware-apps", + 1222: "nerv", + 1223: "tgp", + 1224: "vpnz", + 1225: "slinkysearch", + 1226: "stgxfws", + 1227: "dns2go", + 1228: "florence", + 1229: "zented", + 1230: "periscope", + 1231: "menandmice-lpm", + 1232: "first-defense", + 1233: "univ-appserver", + 1234: "search-agent", + 1235: "mosaicsyssvc1", + 1236: "bvcontrol", + 1237: "tsdos390", + 1238: "hacl-qs", + 1239: "nmsd", + 1240: "instantia", + 1241: "nessus", + 1242: "nmasoverip", + 1243: "serialgateway", + 1244: "isbconference1", + 1245: "isbconference2", + 1246: "payrouter", + 1247: "visionpyramid", + 1248: "hermes", + 1249: "mesavistaco", + 1250: "swldy-sias", + 1251: "servergraph", + 1252: "bspne-pcc", + 1253: "q55-pcc", + 1254: "de-noc", + 1255: "de-cache-query", + 1256: "de-server", + 1257: "shockwave2", + 1258: "opennl", + 1259: "opennl-voice", + 1260: "ibm-ssd", + 1261: "mpshrsv", + 1262: "qnts-orb", + 1263: "dka", + 1264: "prat", + 1265: "dssiapi", + 1266: "dellpwrappks", + 1267: "epc", + 1268: "propel-msgsys", + 1269: "watilapp", + 1270: "opsmgr", + 1271: "excw", + 1272: "cspmlockmgr", + 1273: "emc-gateway", + 1274: "t1distproc", + 1275: "ivcollector", + 1277: "miva-mqs", + 1278: "dellwebadmin-1", + 1279: "dellwebadmin-2", + 1280: "pictrography", + 1281: "healthd", + 1282: "emperion", + 1283: "productinfo", + 1284: "iee-qfx", + 1285: "neoiface", + 1286: "netuitive", + 1287: "routematch", + 1288: "navbuddy", + 1289: "jwalkserver", + 1290: "winjaserver", + 1291: "seagulllms", + 1292: "dsdn", + 1293: "pkt-krb-ipsec", + 1294: "cmmdriver", + 1295: "ehtp", + 1296: "dproxy", + 1297: "sdproxy", + 1298: "lpcp", + 1299: "hp-sci", + 1300: "h323hostcallsc", + 1301: "ci3-software-1", + 1302: "ci3-software-2", + 1303: "sftsrv", + 1304: "boomerang", + 1305: "pe-mike", + 1306: "re-conn-proto", + 1307: "pacmand", + 1308: "odsi", + 1309: "jtag-server", + 1310: "husky", + 1311: "rxmon", + 1312: "sti-envision", + 1313: "bmc-patroldb", + 1314: "pdps", + 1315: "els", + 1316: "exbit-escp", + 1317: "vrts-ipcserver", + 1318: "krb5gatekeeper", + 1319: "amx-icsp", + 1320: "amx-axbnet", + 1321: "pip", + 1322: "novation", + 1323: "brcd", + 1324: "delta-mcp", + 1325: "dx-instrument", + 1326: "wimsic", + 1327: "ultrex", + 1328: "ewall", + 1329: "netdb-export", + 1330: "streetperfect", + 1331: "intersan", + 1332: "pcia-rxp-b", + 1333: "passwrd-policy", + 1334: "writesrv", + 1335: "digital-notary", + 1336: "ischat", + 1337: "menandmice-dns", + 1338: "wmc-log-svc", + 1339: "kjtsiteserver", + 1340: "naap", + 1341: "qubes", + 1342: "esbroker", + 1343: "re101", + 1344: "icap", + 1345: "vpjp", + 1346: "alta-ana-lm", + 1347: "bbn-mmc", + 1348: "bbn-mmx", + 1349: "sbook", + 1350: "editbench", + 1351: "equationbuilder", + 1352: "lotusnote", + 1353: "relief", + 1354: "XSIP-network", + 1355: "intuitive-edge", + 1356: "cuillamartin", + 1357: "pegboard", + 1358: "connlcli", + 1359: "ftsrv", + 1360: "mimer", + 1361: "linx", + 1362: "timeflies", + 1363: "ndm-requester", + 1364: "ndm-server", + 1365: "adapt-sna", + 1366: "netware-csp", + 1367: "dcs", + 1368: "screencast", + 1369: "gv-us", + 1370: "us-gv", + 1371: "fc-cli", + 1372: "fc-ser", + 1373: "chromagrafx", + 1374: "molly", + 1375: "bytex", + 1376: "ibm-pps", + 1377: "cichlid", + 1378: "elan", + 1379: "dbreporter", + 1380: "telesis-licman", + 1381: "apple-licman", + 1382: "udt-os", + 1383: "gwha", + 1384: "os-licman", + 1385: "atex-elmd", + 1386: "checksum", + 1387: "cadsi-lm", + 1388: "objective-dbc", + 1389: "iclpv-dm", + 1390: "iclpv-sc", + 1391: "iclpv-sas", + 1392: "iclpv-pm", + 1393: "iclpv-nls", + 1394: "iclpv-nlc", + 1395: "iclpv-wsm", + 1396: "dvl-activemail", + 1397: "audio-activmail", + 1398: "video-activmail", + 1399: "cadkey-licman", + 1400: "cadkey-tablet", + 1401: "goldleaf-licman", + 1402: "prm-sm-np", + 1403: "prm-nm-np", + 1404: "igi-lm", + 1405: "ibm-res", + 1406: "netlabs-lm", + 1408: "sophia-lm", + 1409: "here-lm", + 1410: "hiq", + 1411: "af", + 1412: "innosys", + 1413: "innosys-acl", + 1414: "ibm-mqseries", + 1415: "dbstar", + 1416: "novell-lu6-2", + 1417: "timbuktu-srv1", + 1418: "timbuktu-srv2", + 1419: "timbuktu-srv3", + 1420: "timbuktu-srv4", + 1421: "gandalf-lm", + 1422: "autodesk-lm", + 1423: "essbase", + 1424: "hybrid", + 1425: "zion-lm", + 1426: "sais", + 1427: "mloadd", + 1428: "informatik-lm", + 1429: "nms", + 1430: "tpdu", + 1431: "rgtp", + 1432: "blueberry-lm", + 1433: "ms-sql-s", + 1434: "ms-sql-m", + 1435: "ibm-cics", + 1436: "saism", + 1437: "tabula", + 1438: "eicon-server", + 1439: "eicon-x25", + 1440: "eicon-slp", + 1441: "cadis-1", + 1442: "cadis-2", + 1443: "ies-lm", + 1444: "marcam-lm", + 1445: "proxima-lm", + 1446: "ora-lm", + 1447: "apri-lm", + 1448: "oc-lm", + 1449: "peport", + 1450: "dwf", + 1451: "infoman", + 1452: "gtegsc-lm", + 1453: "genie-lm", + 1454: "interhdl-elmd", + 1455: "esl-lm", + 1456: "dca", + 1457: "valisys-lm", + 1458: "nrcabq-lm", + 1459: "proshare1", + 1460: "proshare2", + 1461: "ibm-wrless-lan", + 1462: "world-lm", + 1463: "nucleus", + 1464: "msl-lmd", + 1465: "pipes", + 1466: "oceansoft-lm", + 1467: "csdmbase", + 1468: "csdm", + 1469: "aal-lm", + 1470: "uaiact", + 1471: "csdmbase", + 1472: "csdm", + 1473: "openmath", + 1474: "telefinder", + 1475: "taligent-lm", + 1476: "clvm-cfg", + 1477: "ms-sna-server", + 1478: "ms-sna-base", + 1479: "dberegister", + 1480: "pacerforum", + 1481: "airs", + 1482: "miteksys-lm", + 1483: "afs", + 1484: "confluent", + 1485: "lansource", + 1486: "nms-topo-serv", + 1487: "localinfosrvr", + 1488: "docstor", + 1489: "dmdocbroker", + 1490: "insitu-conf", + 1492: "stone-design-1", + 1493: "netmap-lm", + 1494: "ica", + 1495: "cvc", + 1496: "liberty-lm", + 1497: "rfx-lm", + 1498: "sybase-sqlany", + 1499: "fhc", + 1500: "vlsi-lm", + 1501: "saiscm", + 1502: "shivadiscovery", + 1503: "imtc-mcs", + 1504: "evb-elm", + 1505: "funkproxy", + 1506: "utcd", + 1507: "symplex", + 1508: "diagmond", + 1509: "robcad-lm", + 1510: "mvx-lm", + 1511: "3l-l1", + 1512: "wins", + 1513: "fujitsu-dtc", + 1514: "fujitsu-dtcns", + 1515: "ifor-protocol", + 1516: "vpad", + 1517: "vpac", + 1518: "vpvd", + 1519: "vpvc", + 1520: "atm-zip-office", + 1521: "ncube-lm", + 1522: "ricardo-lm", + 1523: "cichild-lm", + 1524: "ingreslock", + 1525: "orasrv", + 1526: "pdap-np", + 1527: "tlisrv", + 1528: "ngr-t", + 1529: "coauthor", + 1530: "rap-service", + 1531: "rap-listen", + 1532: "miroconnect", + 1533: "virtual-places", + 1534: "micromuse-lm", + 1535: "ampr-info", + 1536: "ampr-inter", + 1537: "sdsc-lm", + 1538: "3ds-lm", + 1539: "intellistor-lm", + 1540: "rds", + 1541: "rds2", + 1542: "gridgen-elmd", + 1543: "simba-cs", + 1544: "aspeclmd", + 1545: "vistium-share", + 1546: "abbaccuray", + 1547: "laplink", + 1548: "axon-lm", + 1549: "shivasound", + 1550: "3m-image-lm", + 1551: "hecmtl-db", + 1552: "pciarray", + 1553: "sna-cs", + 1554: "caci-lm", + 1555: "livelan", + 1556: "veritas-pbx", + 1557: "arbortext-lm", + 1558: "xingmpeg", + 1559: "web2host", + 1560: "asci-val", + 1561: "facilityview", + 1562: "pconnectmgr", + 1563: "cadabra-lm", + 1564: "pay-per-view", + 1565: "winddlb", + 1566: "corelvideo", + 1567: "jlicelmd", + 1568: "tsspmap", + 1569: "ets", + 1570: "orbixd", + 1571: "rdb-dbs-disp", + 1572: "chip-lm", + 1573: "itscomm-ns", + 1574: "mvel-lm", + 1575: "oraclenames", + 1576: "moldflow-lm", + 1577: "hypercube-lm", + 1578: "jacobus-lm", + 1579: "ioc-sea-lm", + 1580: "tn-tl-r2", + 1581: "mil-2045-47001", + 1582: "msims", + 1583: "simbaexpress", + 1584: "tn-tl-fd2", + 1585: "intv", + 1586: "ibm-abtact", + 1587: "pra-elmd", + 1588: "triquest-lm", + 1589: "vqp", + 1590: "gemini-lm", + 1591: "ncpm-pm", + 1592: "commonspace", + 1593: "mainsoft-lm", + 1594: "sixtrak", + 1595: "radio", + 1596: "radio-bc", + 1597: "orbplus-iiop", + 1598: "picknfs", + 1599: "simbaservices", + 1600: "issd", + 1601: "aas", + 1602: "inspect", + 1603: "picodbc", + 1604: "icabrowser", + 1605: "slp", + 1606: "slm-api", + 1607: "stt", + 1608: "smart-lm", + 1609: "isysg-lm", + 1610: "taurus-wh", + 1611: "ill", + 1612: "netbill-trans", + 1613: "netbill-keyrep", + 1614: "netbill-cred", + 1615: "netbill-auth", + 1616: "netbill-prod", + 1617: "nimrod-agent", + 1618: "skytelnet", + 1619: "xs-openstorage", + 1620: "faxportwinport", + 1621: "softdataphone", + 1622: "ontime", + 1623: "jaleosnd", + 1624: "udp-sr-port", + 1625: "svs-omagent", + 1626: "shockwave", + 1627: "t128-gateway", + 1628: "lontalk-norm", + 1629: "lontalk-urgnt", + 1630: "oraclenet8cman", + 1631: "visitview", + 1632: "pammratc", + 1633: "pammrpc", + 1634: "loaprobe", + 1635: "edb-server1", + 1636: "isdc", + 1637: "islc", + 1638: "ismc", + 1639: "cert-initiator", + 1640: "cert-responder", + 1641: "invision", + 1642: "isis-am", + 1643: "isis-ambc", + 1644: "saiseh", + 1645: "sightline", + 1646: "sa-msg-port", + 1647: "rsap", + 1648: "concurrent-lm", + 1649: "kermit", + 1650: "nkd", + 1651: "shiva-confsrvr", + 1652: "xnmp", + 1653: "alphatech-lm", + 1654: "stargatealerts", + 1655: "dec-mbadmin", + 1656: "dec-mbadmin-h", + 1657: "fujitsu-mmpdc", + 1658: "sixnetudr", + 1659: "sg-lm", + 1660: "skip-mc-gikreq", + 1661: "netview-aix-1", + 1662: "netview-aix-2", + 1663: "netview-aix-3", + 1664: "netview-aix-4", + 1665: "netview-aix-5", + 1666: "netview-aix-6", + 1667: "netview-aix-7", + 1668: "netview-aix-8", + 1669: "netview-aix-9", + 1670: "netview-aix-10", + 1671: "netview-aix-11", + 1672: "netview-aix-12", + 1673: "proshare-mc-1", + 1674: "proshare-mc-2", + 1675: "pdp", + 1676: "netcomm2", + 1677: "groupwise", + 1678: "prolink", + 1679: "darcorp-lm", + 1680: "microcom-sbp", + 1681: "sd-elmd", + 1682: "lanyon-lantern", + 1683: "ncpm-hip", + 1684: "snaresecure", + 1685: "n2nremote", + 1686: "cvmon", + 1687: "nsjtp-ctrl", + 1688: "nsjtp-data", + 1689: "firefox", + 1690: "ng-umds", + 1691: "empire-empuma", + 1692: "sstsys-lm", + 1693: "rrirtr", + 1694: "rrimwm", + 1695: "rrilwm", + 1696: "rrifmm", + 1697: "rrisat", + 1698: "rsvp-encap-1", + 1699: "rsvp-encap-2", + 1700: "mps-raft", + 1701: "l2f", + 1702: "deskshare", + 1703: "hb-engine", + 1704: "bcs-broker", + 1705: "slingshot", + 1706: "jetform", + 1707: "vdmplay", + 1708: "gat-lmd", + 1709: "centra", + 1710: "impera", + 1711: "pptconference", + 1712: "registrar", + 1713: "conferencetalk", + 1714: "sesi-lm", + 1715: "houdini-lm", + 1716: "xmsg", + 1717: "fj-hdnet", + 1718: "h323gatedisc", + 1719: "h323gatestat", + 1720: "h323hostcall", + 1721: "caicci", + 1722: "hks-lm", + 1723: "pptp", + 1724: "csbphonemaster", + 1725: "iden-ralp", + 1726: "iberiagames", + 1727: "winddx", + 1728: "telindus", + 1729: "citynl", + 1730: "roketz", + 1731: "msiccp", + 1732: "proxim", + 1733: "siipat", + 1734: "cambertx-lm", + 1735: "privatechat", + 1736: "street-stream", + 1737: "ultimad", + 1738: "gamegen1", + 1739: "webaccess", + 1740: "encore", + 1741: "cisco-net-mgmt", + 1742: "3Com-nsd", + 1743: "cinegrfx-lm", + 1744: "ncpm-ft", + 1745: "remote-winsock", + 1746: "ftrapid-1", + 1747: "ftrapid-2", + 1748: "oracle-em1", + 1749: "aspen-services", + 1750: "sslp", + 1751: "swiftnet", + 1752: "lofr-lm", + 1754: "oracle-em2", + 1755: "ms-streaming", + 1756: "capfast-lmd", + 1757: "cnhrp", + 1758: "tftp-mcast", + 1759: "spss-lm", + 1760: "www-ldap-gw", + 1761: "cft-0", + 1762: "cft-1", + 1763: "cft-2", + 1764: "cft-3", + 1765: "cft-4", + 1766: "cft-5", + 1767: "cft-6", + 1768: "cft-7", + 1769: "bmc-net-adm", + 1770: "bmc-net-svc", + 1771: "vaultbase", + 1772: "essweb-gw", + 1773: "kmscontrol", + 1774: "global-dtserv", + 1776: "femis", + 1777: "powerguardian", + 1778: "prodigy-intrnet", + 1779: "pharmasoft", + 1780: "dpkeyserv", + 1781: "answersoft-lm", + 1782: "hp-hcip", + 1784: "finle-lm", + 1785: "windlm", + 1786: "funk-logger", + 1787: "funk-license", + 1788: "psmond", + 1789: "hello", + 1790: "nmsp", + 1791: "ea1", + 1792: "ibm-dt-2", + 1793: "rsc-robot", + 1794: "cera-bcm", + 1795: "dpi-proxy", + 1796: "vocaltec-admin", + 1797: "uma", + 1798: "etp", + 1799: "netrisk", + 1800: "ansys-lm", + 1801: "msmq", + 1802: "concomp1", + 1803: "hp-hcip-gwy", + 1804: "enl", + 1805: "enl-name", + 1806: "musiconline", + 1807: "fhsp", + 1808: "oracle-vp2", + 1809: "oracle-vp1", + 1810: "jerand-lm", + 1811: "scientia-sdb", + 1812: "radius", + 1813: "radius-acct", + 1814: "tdp-suite", + 1815: "mmpft", + 1816: "harp", + 1817: "rkb-oscs", + 1818: "etftp", + 1819: "plato-lm", + 1820: "mcagent", + 1821: "donnyworld", + 1822: "es-elmd", + 1823: "unisys-lm", + 1824: "metrics-pas", + 1825: "direcpc-video", + 1826: "ardt", + 1827: "asi", + 1828: "itm-mcell-u", + 1829: "optika-emedia", + 1830: "net8-cman", + 1831: "myrtle", + 1832: "tht-treasure", + 1833: "udpradio", + 1834: "ardusuni", + 1835: "ardusmul", + 1836: "ste-smsc", + 1837: "csoft1", + 1838: "talnet", + 1839: "netopia-vo1", + 1840: "netopia-vo2", + 1841: "netopia-vo3", + 1842: "netopia-vo4", + 1843: "netopia-vo5", + 1844: "direcpc-dll", + 1845: "altalink", + 1846: "tunstall-pnc", + 1847: "slp-notify", + 1848: "fjdocdist", + 1849: "alpha-sms", + 1850: "gsi", + 1851: "ctcd", + 1852: "virtual-time", + 1853: "vids-avtp", + 1854: "buddy-draw", + 1855: "fiorano-rtrsvc", + 1856: "fiorano-msgsvc", + 1857: "datacaptor", + 1858: "privateark", + 1859: "gammafetchsvr", + 1860: "sunscalar-svc", + 1861: "lecroy-vicp", + 1862: "mysql-cm-agent", + 1863: "msnp", + 1864: "paradym-31port", + 1865: "entp", + 1866: "swrmi", + 1867: "udrive", + 1868: "viziblebrowser", + 1869: "transact", + 1870: "sunscalar-dns", + 1871: "canocentral0", + 1872: "canocentral1", + 1873: "fjmpjps", + 1874: "fjswapsnp", + 1875: "westell-stats", + 1876: "ewcappsrv", + 1877: "hp-webqosdb", + 1878: "drmsmc", + 1879: "nettgain-nms", + 1880: "vsat-control", + 1881: "ibm-mqseries2", + 1882: "ecsqdmn", + 1883: "mqtt", + 1884: "idmaps", + 1885: "vrtstrapserver", + 1886: "leoip", + 1887: "filex-lport", + 1888: "ncconfig", + 1889: "unify-adapter", + 1890: "wilkenlistener", + 1891: "childkey-notif", + 1892: "childkey-ctrl", + 1893: "elad", + 1894: "o2server-port", + 1896: "b-novative-ls", + 1897: "metaagent", + 1898: "cymtec-port", + 1899: "mc2studios", + 1900: "ssdp", + 1901: "fjicl-tep-a", + 1902: "fjicl-tep-b", + 1903: "linkname", + 1904: "fjicl-tep-c", + 1905: "sugp", + 1906: "tpmd", + 1907: "intrastar", + 1908: "dawn", + 1909: "global-wlink", + 1910: "ultrabac", + 1911: "mtp", + 1912: "rhp-iibp", + 1913: "armadp", + 1914: "elm-momentum", + 1915: "facelink", + 1916: "persona", + 1917: "noagent", + 1918: "can-nds", + 1919: "can-dch", + 1920: "can-ferret", + 1921: "noadmin", + 1922: "tapestry", + 1923: "spice", + 1924: "xiip", + 1925: "discovery-port", + 1926: "egs", + 1927: "videte-cipc", + 1928: "emsd-port", + 1929: "bandwiz-system", + 1930: "driveappserver", + 1931: "amdsched", + 1932: "ctt-broker", + 1933: "xmapi", + 1934: "xaapi", + 1935: "macromedia-fcs", + 1936: "jetcmeserver", + 1937: "jwserver", + 1938: "jwclient", + 1939: "jvserver", + 1940: "jvclient", + 1941: "dic-aida", + 1942: "res", + 1943: "beeyond-media", + 1944: "close-combat", + 1945: "dialogic-elmd", + 1946: "tekpls", + 1947: "sentinelsrm", + 1948: "eye2eye", + 1949: "ismaeasdaqlive", + 1950: "ismaeasdaqtest", + 1951: "bcs-lmserver", + 1952: "mpnjsc", + 1953: "rapidbase", + 1954: "abr-api", + 1955: "abr-secure", + 1956: "vrtl-vmf-ds", + 1957: "unix-status", + 1958: "dxadmind", + 1959: "simp-all", + 1960: "nasmanager", + 1961: "bts-appserver", + 1962: "biap-mp", + 1963: "webmachine", + 1964: "solid-e-engine", + 1965: "tivoli-npm", + 1966: "slush", + 1967: "sns-quote", + 1968: "lipsinc", + 1969: "lipsinc1", + 1970: "netop-rc", + 1971: "netop-school", + 1972: "intersys-cache", + 1973: "dlsrap", + 1974: "drp", + 1975: "tcoflashagent", + 1976: "tcoregagent", + 1977: "tcoaddressbook", + 1978: "unisql", + 1979: "unisql-java", + 1980: "pearldoc-xact", + 1981: "p2pq", + 1982: "estamp", + 1983: "lhtp", + 1984: "bb", + 1985: "hsrp", + 1986: "licensedaemon", + 1987: "tr-rsrb-p1", + 1988: "tr-rsrb-p2", + 1989: "tr-rsrb-p3", + 1990: "stun-p1", + 1991: "stun-p2", + 1992: "stun-p3", + 1993: "snmp-tcp-port", + 1994: "stun-port", + 1995: "perf-port", + 1996: "tr-rsrb-port", + 1997: "gdp-port", + 1998: "x25-svc-port", + 1999: "tcp-id-port", + 2000: "cisco-sccp", + 2001: "wizard", + 2002: "globe", + 2003: "brutus", + 2004: "emce", + 2005: "oracle", + 2006: "raid-cd", + 2007: "raid-am", + 2008: "terminaldb", + 2009: "whosockami", + 2010: "pipe-server", + 2011: "servserv", + 2012: "raid-ac", + 2013: "raid-cd", + 2014: "raid-sf", + 2015: "raid-cs", + 2016: "bootserver", + 2017: "bootclient", + 2018: "rellpack", + 2019: "about", + 2020: "xinupageserver", + 2021: "xinuexpansion1", + 2022: "xinuexpansion2", + 2023: "xinuexpansion3", + 2024: "xinuexpansion4", + 2025: "xribs", + 2026: "scrabble", + 2027: "shadowserver", + 2028: "submitserver", + 2029: "hsrpv6", + 2030: "device2", + 2031: "mobrien-chat", + 2032: "blackboard", + 2033: "glogger", + 2034: "scoremgr", + 2035: "imsldoc", + 2036: "e-dpnet", + 2037: "applus", + 2038: "objectmanager", + 2039: "prizma", + 2040: "lam", + 2041: "interbase", + 2042: "isis", + 2043: "isis-bcast", + 2044: "rimsl", + 2045: "cdfunc", + 2046: "sdfunc", + 2047: "dls", + 2048: "dls-monitor", + 2049: "shilp", + 2050: "av-emb-config", + 2051: "epnsdp", + 2052: "clearvisn", + 2053: "lot105-ds-upd", + 2054: "weblogin", + 2055: "iop", + 2056: "omnisky", + 2057: "rich-cp", + 2058: "newwavesearch", + 2059: "bmc-messaging", + 2060: "teleniumdaemon", + 2061: "netmount", + 2062: "icg-swp", + 2063: "icg-bridge", + 2064: "icg-iprelay", + 2065: "dlsrpn", + 2066: "aura", + 2067: "dlswpn", + 2068: "avauthsrvprtcl", + 2069: "event-port", + 2070: "ah-esp-encap", + 2071: "acp-port", + 2072: "msync", + 2073: "gxs-data-port", + 2074: "vrtl-vmf-sa", + 2075: "newlixengine", + 2076: "newlixconfig", + 2077: "tsrmagt", + 2078: "tpcsrvr", + 2079: "idware-router", + 2080: "autodesk-nlm", + 2081: "kme-trap-port", + 2082: "infowave", + 2083: "radsec", + 2084: "sunclustergeo", + 2085: "ada-cip", + 2086: "gnunet", + 2087: "eli", + 2088: "ip-blf", + 2089: "sep", + 2090: "lrp", + 2091: "prp", + 2092: "descent3", + 2093: "nbx-cc", + 2094: "nbx-au", + 2095: "nbx-ser", + 2096: "nbx-dir", + 2097: "jetformpreview", + 2098: "dialog-port", + 2099: "h2250-annex-g", + 2100: "amiganetfs", + 2101: "rtcm-sc104", + 2102: "zephyr-srv", + 2103: "zephyr-clt", + 2104: "zephyr-hm", + 2105: "minipay", + 2106: "mzap", + 2107: "bintec-admin", + 2108: "comcam", + 2109: "ergolight", + 2110: "umsp", + 2111: "dsatp", + 2112: "idonix-metanet", + 2113: "hsl-storm", + 2114: "newheights", + 2115: "kdm", + 2116: "ccowcmr", + 2117: "mentaclient", + 2118: "mentaserver", + 2119: "gsigatekeeper", + 2120: "qencp", + 2121: "scientia-ssdb", + 2122: "caupc-remote", + 2123: "gtp-control", + 2124: "elatelink", + 2125: "lockstep", + 2126: "pktcable-cops", + 2127: "index-pc-wb", + 2128: "net-steward", + 2129: "cs-live", + 2130: "xds", + 2131: "avantageb2b", + 2132: "solera-epmap", + 2133: "zymed-zpp", + 2134: "avenue", + 2135: "gris", + 2136: "appworxsrv", + 2137: "connect", + 2138: "unbind-cluster", + 2139: "ias-auth", + 2140: "ias-reg", + 2141: "ias-admind", + 2142: "tdmoip", + 2143: "lv-jc", + 2144: "lv-ffx", + 2145: "lv-pici", + 2146: "lv-not", + 2147: "lv-auth", + 2148: "veritas-ucl", + 2149: "acptsys", + 2150: "dynamic3d", + 2151: "docent", + 2152: "gtp-user", + 2153: "ctlptc", + 2154: "stdptc", + 2155: "brdptc", + 2156: "trp", + 2157: "xnds", + 2158: "touchnetplus", + 2159: "gdbremote", + 2160: "apc-2160", + 2161: "apc-2161", + 2162: "navisphere", + 2163: "navisphere-sec", + 2164: "ddns-v3", + 2165: "x-bone-api", + 2166: "iwserver", + 2167: "raw-serial", + 2168: "easy-soft-mux", + 2169: "brain", + 2170: "eyetv", + 2171: "msfw-storage", + 2172: "msfw-s-storage", + 2173: "msfw-replica", + 2174: "msfw-array", + 2175: "airsync", + 2176: "rapi", + 2177: "qwave", + 2178: "bitspeer", + 2179: "vmrdp", + 2180: "mc-gt-srv", + 2181: "eforward", + 2182: "cgn-stat", + 2183: "cgn-config", + 2184: "nvd", + 2185: "onbase-dds", + 2186: "gtaua", + 2187: "ssmd", + 2190: "tivoconnect", + 2191: "tvbus", + 2192: "asdis", + 2193: "drwcs", + 2197: "mnp-exchange", + 2198: "onehome-remote", + 2199: "onehome-help", + 2200: "ici", + 2201: "ats", + 2202: "imtc-map", + 2203: "b2-runtime", + 2204: "b2-license", + 2205: "jps", + 2206: "hpocbus", + 2207: "hpssd", + 2208: "hpiod", + 2209: "rimf-ps", + 2210: "noaaport", + 2211: "emwin", + 2212: "leecoposserver", + 2213: "kali", + 2214: "rpi", + 2215: "ipcore", + 2216: "vtu-comms", + 2217: "gotodevice", + 2218: "bounzza", + 2219: "netiq-ncap", + 2220: "netiq", + 2221: "ethernet-ip-s", + 2222: "EtherNet-IP-1", + 2223: "rockwell-csp2", + 2224: "efi-mg", + 2226: "di-drm", + 2227: "di-msg", + 2228: "ehome-ms", + 2229: "datalens", + 2230: "queueadm", + 2231: "wimaxasncp", + 2232: "ivs-video", + 2233: "infocrypt", + 2234: "directplay", + 2235: "sercomm-wlink", + 2236: "nani", + 2237: "optech-port1-lm", + 2238: "aviva-sna", + 2239: "imagequery", + 2240: "recipe", + 2241: "ivsd", + 2242: "foliocorp", + 2243: "magicom", + 2244: "nmsserver", + 2245: "hao", + 2246: "pc-mta-addrmap", + 2247: "antidotemgrsvr", + 2248: "ums", + 2249: "rfmp", + 2250: "remote-collab", + 2251: "dif-port", + 2252: "njenet-ssl", + 2253: "dtv-chan-req", + 2254: "seispoc", + 2255: "vrtp", + 2256: "pcc-mfp", + 2257: "simple-tx-rx", + 2258: "rcts", + 2260: "apc-2260", + 2261: "comotionmaster", + 2262: "comotionback", + 2263: "ecwcfg", + 2264: "apx500api-1", + 2265: "apx500api-2", + 2266: "mfserver", + 2267: "ontobroker", + 2268: "amt", + 2269: "mikey", + 2270: "starschool", + 2271: "mmcals", + 2272: "mmcal", + 2273: "mysql-im", + 2274: "pcttunnell", + 2275: "ibridge-data", + 2276: "ibridge-mgmt", + 2277: "bluectrlproxy", + 2278: "s3db", + 2279: "xmquery", + 2280: "lnvpoller", + 2281: "lnvconsole", + 2282: "lnvalarm", + 2283: "lnvstatus", + 2284: "lnvmaps", + 2285: "lnvmailmon", + 2286: "nas-metering", + 2287: "dna", + 2288: "netml", + 2289: "dict-lookup", + 2290: "sonus-logging", + 2291: "eapsp", + 2292: "mib-streaming", + 2293: "npdbgmngr", + 2294: "konshus-lm", + 2295: "advant-lm", + 2296: "theta-lm", + 2297: "d2k-datamover1", + 2298: "d2k-datamover2", + 2299: "pc-telecommute", + 2300: "cvmmon", + 2301: "cpq-wbem", + 2302: "binderysupport", + 2303: "proxy-gateway", + 2304: "attachmate-uts", + 2305: "mt-scaleserver", + 2306: "tappi-boxnet", + 2307: "pehelp", + 2308: "sdhelp", + 2309: "sdserver", + 2310: "sdclient", + 2311: "messageservice", + 2312: "wanscaler", + 2313: "iapp", + 2314: "cr-websystems", + 2315: "precise-sft", + 2316: "sent-lm", + 2317: "attachmate-g32", + 2318: "cadencecontrol", + 2319: "infolibria", + 2320: "siebel-ns", + 2321: "rdlap", + 2322: "ofsd", + 2323: "3d-nfsd", + 2324: "cosmocall", + 2325: "ansysli", + 2326: "idcp", + 2327: "xingcsm", + 2328: "netrix-sftm", + 2329: "nvd", + 2330: "tscchat", + 2331: "agentview", + 2332: "rcc-host", + 2333: "snapp", + 2334: "ace-client", + 2335: "ace-proxy", + 2336: "appleugcontrol", + 2337: "ideesrv", + 2338: "norton-lambert", + 2339: "3com-webview", + 2340: "wrs-registry", + 2341: "xiostatus", + 2342: "manage-exec", + 2343: "nati-logos", + 2344: "fcmsys", + 2345: "dbm", + 2346: "redstorm-join", + 2347: "redstorm-find", + 2348: "redstorm-info", + 2349: "redstorm-diag", + 2350: "psbserver", + 2351: "psrserver", + 2352: "pslserver", + 2353: "pspserver", + 2354: "psprserver", + 2355: "psdbserver", + 2356: "gxtelmd", + 2357: "unihub-server", + 2358: "futrix", + 2359: "flukeserver", + 2360: "nexstorindltd", + 2361: "tl1", + 2362: "digiman", + 2363: "mediacntrlnfsd", + 2364: "oi-2000", + 2365: "dbref", + 2366: "qip-login", + 2367: "service-ctrl", + 2368: "opentable", + 2370: "l3-hbmon", + 2372: "lanmessenger", + 2381: "compaq-https", + 2382: "ms-olap3", + 2383: "ms-olap4", + 2384: "sd-capacity", + 2385: "sd-data", + 2386: "virtualtape", + 2387: "vsamredirector", + 2388: "mynahautostart", + 2389: "ovsessionmgr", + 2390: "rsmtp", + 2391: "3com-net-mgmt", + 2392: "tacticalauth", + 2393: "ms-olap1", + 2394: "ms-olap2", + 2395: "lan900-remote", + 2396: "wusage", + 2397: "ncl", + 2398: "orbiter", + 2399: "fmpro-fdal", + 2400: "opequus-server", + 2401: "cvspserver", + 2402: "taskmaster2000", + 2403: "taskmaster2000", + 2404: "iec-104", + 2405: "trc-netpoll", + 2406: "jediserver", + 2407: "orion", + 2409: "sns-protocol", + 2410: "vrts-registry", + 2411: "netwave-ap-mgmt", + 2412: "cdn", + 2413: "orion-rmi-reg", + 2414: "beeyond", + 2415: "codima-rtp", + 2416: "rmtserver", + 2417: "composit-server", + 2418: "cas", + 2419: "attachmate-s2s", + 2420: "dslremote-mgmt", + 2421: "g-talk", + 2422: "crmsbits", + 2423: "rnrp", + 2424: "kofax-svr", + 2425: "fjitsuappmgr", + 2426: "vcmp", + 2427: "mgcp-gateway", + 2428: "ott", + 2429: "ft-role", + 2430: "venus", + 2431: "venus-se", + 2432: "codasrv", + 2433: "codasrv-se", + 2434: "pxc-epmap", + 2435: "optilogic", + 2436: "topx", + 2437: "unicontrol", + 2438: "msp", + 2439: "sybasedbsynch", + 2440: "spearway", + 2441: "pvsw-inet", + 2442: "netangel", + 2443: "powerclientcsf", + 2444: "btpp2sectrans", + 2445: "dtn1", + 2446: "bues-service", + 2447: "ovwdb", + 2448: "hpppssvr", + 2449: "ratl", + 2450: "netadmin", + 2451: "netchat", + 2452: "snifferclient", + 2453: "madge-ltd", + 2454: "indx-dds", + 2455: "wago-io-system", + 2456: "altav-remmgt", + 2457: "rapido-ip", + 2458: "griffin", + 2459: "community", + 2460: "ms-theater", + 2461: "qadmifoper", + 2462: "qadmifevent", + 2463: "lsi-raid-mgmt", + 2464: "direcpc-si", + 2465: "lbm", + 2466: "lbf", + 2467: "high-criteria", + 2468: "qip-msgd", + 2469: "mti-tcs-comm", + 2470: "taskman-port", + 2471: "seaodbc", + 2472: "c3", + 2473: "aker-cdp", + 2474: "vitalanalysis", + 2475: "ace-server", + 2476: "ace-svr-prop", + 2477: "ssm-cvs", + 2478: "ssm-cssps", + 2479: "ssm-els", + 2480: "powerexchange", + 2481: "giop", + 2482: "giop-ssl", + 2483: "ttc", + 2484: "ttc-ssl", + 2485: "netobjects1", + 2486: "netobjects2", + 2487: "pns", + 2488: "moy-corp", + 2489: "tsilb", + 2490: "qip-qdhcp", + 2491: "conclave-cpp", + 2492: "groove", + 2493: "talarian-mqs", + 2494: "bmc-ar", + 2495: "fast-rem-serv", + 2496: "dirgis", + 2497: "quaddb", + 2498: "odn-castraq", + 2499: "unicontrol", + 2500: "rtsserv", + 2501: "rtsclient", + 2502: "kentrox-prot", + 2503: "nms-dpnss", + 2504: "wlbs", + 2505: "ppcontrol", + 2506: "jbroker", + 2507: "spock", + 2508: "jdatastore", + 2509: "fjmpss", + 2510: "fjappmgrbulk", + 2511: "metastorm", + 2512: "citrixima", + 2513: "citrixadmin", + 2514: "facsys-ntp", + 2515: "facsys-router", + 2516: "maincontrol", + 2517: "call-sig-trans", + 2518: "willy", + 2519: "globmsgsvc", + 2520: "pvsw", + 2521: "adaptecmgr", + 2522: "windb", + 2523: "qke-llc-v3", + 2524: "optiwave-lm", + 2525: "ms-v-worlds", + 2526: "ema-sent-lm", + 2527: "iqserver", + 2528: "ncr-ccl", + 2529: "utsftp", + 2530: "vrcommerce", + 2531: "ito-e-gui", + 2532: "ovtopmd", + 2533: "snifferserver", + 2534: "combox-web-acc", + 2535: "madcap", + 2536: "btpp2audctr1", + 2537: "upgrade", + 2538: "vnwk-prapi", + 2539: "vsiadmin", + 2540: "lonworks", + 2541: "lonworks2", + 2542: "udrawgraph", + 2543: "reftek", + 2544: "novell-zen", + 2545: "sis-emt", + 2546: "vytalvaultbrtp", + 2547: "vytalvaultvsmp", + 2548: "vytalvaultpipe", + 2549: "ipass", + 2550: "ads", + 2551: "isg-uda-server", + 2552: "call-logging", + 2553: "efidiningport", + 2554: "vcnet-link-v10", + 2555: "compaq-wcp", + 2556: "nicetec-nmsvc", + 2557: "nicetec-mgmt", + 2558: "pclemultimedia", + 2559: "lstp", + 2560: "labrat", + 2561: "mosaixcc", + 2562: "delibo", + 2563: "cti-redwood", + 2564: "hp-3000-telnet", + 2565: "coord-svr", + 2566: "pcs-pcw", + 2567: "clp", + 2568: "spamtrap", + 2569: "sonuscallsig", + 2570: "hs-port", + 2571: "cecsvc", + 2572: "ibp", + 2573: "trustestablish", + 2574: "blockade-bpsp", + 2575: "hl7", + 2576: "tclprodebugger", + 2577: "scipticslsrvr", + 2578: "rvs-isdn-dcp", + 2579: "mpfoncl", + 2580: "tributary", + 2581: "argis-te", + 2582: "argis-ds", + 2583: "mon", + 2584: "cyaserv", + 2585: "netx-server", + 2586: "netx-agent", + 2587: "masc", + 2588: "privilege", + 2589: "quartus-tcl", + 2590: "idotdist", + 2591: "maytagshuffle", + 2592: "netrek", + 2593: "mns-mail", + 2594: "dts", + 2595: "worldfusion1", + 2596: "worldfusion2", + 2597: "homesteadglory", + 2598: "citriximaclient", + 2599: "snapd", + 2600: "hpstgmgr", + 2601: "discp-client", + 2602: "discp-server", + 2603: "servicemeter", + 2604: "nsc-ccs", + 2605: "nsc-posa", + 2606: "netmon", + 2607: "connection", + 2608: "wag-service", + 2609: "system-monitor", + 2610: "versa-tek", + 2611: "lionhead", + 2612: "qpasa-agent", + 2613: "smntubootstrap", + 2614: "neveroffline", + 2615: "firepower", + 2616: "appswitch-emp", + 2617: "cmadmin", + 2618: "priority-e-com", + 2619: "bruce", + 2620: "lpsrecommender", + 2621: "miles-apart", + 2622: "metricadbc", + 2623: "lmdp", + 2624: "aria", + 2625: "blwnkl-port", + 2626: "gbjd816", + 2627: "moshebeeri", + 2628: "dict", + 2629: "sitaraserver", + 2630: "sitaramgmt", + 2631: "sitaradir", + 2632: "irdg-post", + 2633: "interintelli", + 2634: "pk-electronics", + 2635: "backburner", + 2636: "solve", + 2637: "imdocsvc", + 2638: "sybaseanywhere", + 2639: "aminet", + 2640: "ami-control", + 2641: "hdl-srv", + 2642: "tragic", + 2643: "gte-samp", + 2644: "travsoft-ipx-t", + 2645: "novell-ipx-cmd", + 2646: "and-lm", + 2647: "syncserver", + 2648: "upsnotifyprot", + 2649: "vpsipport", + 2650: "eristwoguns", + 2651: "ebinsite", + 2652: "interpathpanel", + 2653: "sonus", + 2654: "corel-vncadmin", + 2655: "unglue", + 2656: "kana", + 2657: "sns-dispatcher", + 2658: "sns-admin", + 2659: "sns-query", + 2660: "gcmonitor", + 2661: "olhost", + 2662: "bintec-capi", + 2663: "bintec-tapi", + 2664: "patrol-mq-gm", + 2665: "patrol-mq-nm", + 2666: "extensis", + 2667: "alarm-clock-s", + 2668: "alarm-clock-c", + 2669: "toad", + 2670: "tve-announce", + 2671: "newlixreg", + 2672: "nhserver", + 2673: "firstcall42", + 2674: "ewnn", + 2675: "ttc-etap", + 2676: "simslink", + 2677: "gadgetgate1way", + 2678: "gadgetgate2way", + 2679: "syncserverssl", + 2680: "pxc-sapxom", + 2681: "mpnjsomb", + 2683: "ncdloadbalance", + 2684: "mpnjsosv", + 2685: "mpnjsocl", + 2686: "mpnjsomg", + 2687: "pq-lic-mgmt", + 2688: "md-cg-http", + 2689: "fastlynx", + 2690: "hp-nnm-data", + 2691: "itinternet", + 2692: "admins-lms", + 2694: "pwrsevent", + 2695: "vspread", + 2696: "unifyadmin", + 2697: "oce-snmp-trap", + 2698: "mck-ivpip", + 2699: "csoft-plusclnt", + 2700: "tqdata", + 2701: "sms-rcinfo", + 2702: "sms-xfer", + 2703: "sms-chat", + 2704: "sms-remctrl", + 2705: "sds-admin", + 2706: "ncdmirroring", + 2707: "emcsymapiport", + 2708: "banyan-net", + 2709: "supermon", + 2710: "sso-service", + 2711: "sso-control", + 2712: "aocp", + 2713: "raventbs", + 2714: "raventdm", + 2715: "hpstgmgr2", + 2716: "inova-ip-disco", + 2717: "pn-requester", + 2718: "pn-requester2", + 2719: "scan-change", + 2720: "wkars", + 2721: "smart-diagnose", + 2722: "proactivesrvr", + 2723: "watchdog-nt", + 2724: "qotps", + 2725: "msolap-ptp2", + 2726: "tams", + 2727: "mgcp-callagent", + 2728: "sqdr", + 2729: "tcim-control", + 2730: "nec-raidplus", + 2731: "fyre-messanger", + 2732: "g5m", + 2733: "signet-ctf", + 2734: "ccs-software", + 2735: "netiq-mc", + 2736: "radwiz-nms-srv", + 2737: "srp-feedback", + 2738: "ndl-tcp-ois-gw", + 2739: "tn-timing", + 2740: "alarm", + 2741: "tsb", + 2742: "tsb2", + 2743: "murx", + 2744: "honyaku", + 2745: "urbisnet", + 2746: "cpudpencap", + 2747: "fjippol-swrly", + 2748: "fjippol-polsvr", + 2749: "fjippol-cnsl", + 2750: "fjippol-port1", + 2751: "fjippol-port2", + 2752: "rsisysaccess", + 2753: "de-spot", + 2754: "apollo-cc", + 2755: "expresspay", + 2756: "simplement-tie", + 2757: "cnrp", + 2758: "apollo-status", + 2759: "apollo-gms", + 2760: "sabams", + 2761: "dicom-iscl", + 2762: "dicom-tls", + 2763: "desktop-dna", + 2764: "data-insurance", + 2765: "qip-audup", + 2766: "compaq-scp", + 2767: "uadtc", + 2768: "uacs", + 2769: "exce", + 2770: "veronica", + 2771: "vergencecm", + 2772: "auris", + 2773: "rbakcup1", + 2774: "rbakcup2", + 2775: "smpp", + 2776: "ridgeway1", + 2777: "ridgeway2", + 2778: "gwen-sonya", + 2779: "lbc-sync", + 2780: "lbc-control", + 2781: "whosells", + 2782: "everydayrc", + 2783: "aises", + 2784: "www-dev", + 2785: "aic-np", + 2786: "aic-oncrpc", + 2787: "piccolo", + 2788: "fryeserv", + 2789: "media-agent", + 2790: "plgproxy", + 2791: "mtport-regist", + 2792: "f5-globalsite", + 2793: "initlsmsad", + 2795: "livestats", + 2796: "ac-tech", + 2797: "esp-encap", + 2798: "tmesis-upshot", + 2799: "icon-discover", + 2800: "acc-raid", + 2801: "igcp", + 2802: "veritas-udp1", + 2803: "btprjctrl", + 2804: "dvr-esm", + 2805: "wta-wsp-s", + 2806: "cspuni", + 2807: "cspmulti", + 2808: "j-lan-p", + 2809: "corbaloc", + 2810: "netsteward", + 2811: "gsiftp", + 2812: "atmtcp", + 2813: "llm-pass", + 2814: "llm-csv", + 2815: "lbc-measure", + 2816: "lbc-watchdog", + 2817: "nmsigport", + 2818: "rmlnk", + 2819: "fc-faultnotify", + 2820: "univision", + 2821: "vrts-at-port", + 2822: "ka0wuc", + 2823: "cqg-netlan", + 2824: "cqg-netlan-1", + 2826: "slc-systemlog", + 2827: "slc-ctrlrloops", + 2828: "itm-lm", + 2829: "silkp1", + 2830: "silkp2", + 2831: "silkp3", + 2832: "silkp4", + 2833: "glishd", + 2834: "evtp", + 2835: "evtp-data", + 2836: "catalyst", + 2837: "repliweb", + 2838: "starbot", + 2839: "nmsigport", + 2840: "l3-exprt", + 2841: "l3-ranger", + 2842: "l3-hawk", + 2843: "pdnet", + 2844: "bpcp-poll", + 2845: "bpcp-trap", + 2846: "aimpp-hello", + 2847: "aimpp-port-req", + 2848: "amt-blc-port", + 2849: "fxp", + 2850: "metaconsole", + 2851: "webemshttp", + 2852: "bears-01", + 2853: "ispipes", + 2854: "infomover", + 2856: "cesdinv", + 2857: "simctlp", + 2858: "ecnp", + 2859: "activememory", + 2860: "dialpad-voice1", + 2861: "dialpad-voice2", + 2862: "ttg-protocol", + 2863: "sonardata", + 2864: "astromed-main", + 2865: "pit-vpn", + 2866: "iwlistener", + 2867: "esps-portal", + 2868: "npep-messaging", + 2869: "icslap", + 2870: "daishi", + 2871: "msi-selectplay", + 2872: "radix", + 2874: "dxmessagebase1", + 2875: "dxmessagebase2", + 2876: "sps-tunnel", + 2877: "bluelance", + 2878: "aap", + 2879: "ucentric-ds", + 2880: "synapse", + 2881: "ndsp", + 2882: "ndtp", + 2883: "ndnp", + 2884: "flashmsg", + 2885: "topflow", + 2886: "responselogic", + 2887: "aironetddp", + 2888: "spcsdlobby", + 2889: "rsom", + 2890: "cspclmulti", + 2891: "cinegrfx-elmd", + 2892: "snifferdata", + 2893: "vseconnector", + 2894: "abacus-remote", + 2895: "natuslink", + 2896: "ecovisiong6-1", + 2897: "citrix-rtmp", + 2898: "appliance-cfg", + 2899: "powergemplus", + 2900: "quicksuite", + 2901: "allstorcns", + 2902: "netaspi", + 2903: "suitcase", + 2904: "m2ua", + 2906: "caller9", + 2907: "webmethods-b2b", + 2908: "mao", + 2909: "funk-dialout", + 2910: "tdaccess", + 2911: "blockade", + 2912: "epicon", + 2913: "boosterware", + 2914: "gamelobby", + 2915: "tksocket", + 2916: "elvin-server", + 2917: "elvin-client", + 2918: "kastenchasepad", + 2919: "roboer", + 2920: "roboeda", + 2921: "cesdcdman", + 2922: "cesdcdtrn", + 2923: "wta-wsp-wtp-s", + 2924: "precise-vip", + 2926: "mobile-file-dl", + 2927: "unimobilectrl", + 2928: "redstone-cpss", + 2929: "amx-webadmin", + 2930: "amx-weblinx", + 2931: "circle-x", + 2932: "incp", + 2933: "4-tieropmgw", + 2934: "4-tieropmcli", + 2935: "qtp", + 2936: "otpatch", + 2937: "pnaconsult-lm", + 2938: "sm-pas-1", + 2939: "sm-pas-2", + 2940: "sm-pas-3", + 2941: "sm-pas-4", + 2942: "sm-pas-5", + 2943: "ttnrepository", + 2944: "megaco-h248", + 2945: "h248-binary", + 2946: "fjsvmpor", + 2947: "gpsd", + 2948: "wap-push", + 2949: "wap-pushsecure", + 2950: "esip", + 2951: "ottp", + 2952: "mpfwsas", + 2953: "ovalarmsrv", + 2954: "ovalarmsrv-cmd", + 2955: "csnotify", + 2956: "ovrimosdbman", + 2957: "jmact5", + 2958: "jmact6", + 2959: "rmopagt", + 2960: "dfoxserver", + 2961: "boldsoft-lm", + 2962: "iph-policy-cli", + 2963: "iph-policy-adm", + 2964: "bullant-srap", + 2965: "bullant-rap", + 2966: "idp-infotrieve", + 2967: "ssc-agent", + 2968: "enpp", + 2969: "essp", + 2970: "index-net", + 2971: "netclip", + 2972: "pmsm-webrctl", + 2973: "svnetworks", + 2974: "signal", + 2975: "fjmpcm", + 2976: "cns-srv-port", + 2977: "ttc-etap-ns", + 2978: "ttc-etap-ds", + 2979: "h263-video", + 2980: "wimd", + 2981: "mylxamport", + 2982: "iwb-whiteboard", + 2983: "netplan", + 2984: "hpidsadmin", + 2985: "hpidsagent", + 2986: "stonefalls", + 2987: "identify", + 2988: "hippad", + 2989: "zarkov", + 2990: "boscap", + 2991: "wkstn-mon", + 2992: "avenyo", + 2993: "veritas-vis1", + 2994: "veritas-vis2", + 2995: "idrs", + 2996: "vsixml", + 2997: "rebol", + 2998: "realsecure", + 2999: "remoteware-un", + 3000: "hbci", + 3002: "exlm-agent", + 3003: "cgms", + 3004: "csoftragent", + 3005: "geniuslm", + 3006: "ii-admin", + 3007: "lotusmtap", + 3008: "midnight-tech", + 3009: "pxc-ntfy", + 3010: "ping-pong", + 3011: "trusted-web", + 3012: "twsdss", + 3013: "gilatskysurfer", + 3014: "broker-service", + 3015: "nati-dstp", + 3016: "notify-srvr", + 3017: "event-listener", + 3018: "srvc-registry", + 3019: "resource-mgr", + 3020: "cifs", + 3021: "agriserver", + 3022: "csregagent", + 3023: "magicnotes", + 3024: "nds-sso", + 3025: "arepa-raft", + 3026: "agri-gateway", + 3027: "LiebDevMgmt-C", + 3028: "LiebDevMgmt-DM", + 3029: "LiebDevMgmt-A", + 3030: "arepa-cas", + 3031: "eppc", + 3032: "redwood-chat", + 3033: "pdb", + 3034: "osmosis-aeea", + 3035: "fjsv-gssagt", + 3036: "hagel-dump", + 3037: "hp-san-mgmt", + 3038: "santak-ups", + 3039: "cogitate", + 3040: "tomato-springs", + 3041: "di-traceware", + 3042: "journee", + 3043: "brp", + 3044: "epp", + 3045: "responsenet", + 3046: "di-ase", + 3047: "hlserver", + 3048: "pctrader", + 3049: "nsws", + 3050: "gds-db", + 3051: "galaxy-server", + 3052: "apc-3052", + 3053: "dsom-server", + 3054: "amt-cnf-prot", + 3055: "policyserver", + 3056: "cdl-server", + 3057: "goahead-fldup", + 3058: "videobeans", + 3059: "qsoft", + 3060: "interserver", + 3061: "cautcpd", + 3062: "ncacn-ip-tcp", + 3063: "ncadg-ip-udp", + 3064: "rprt", + 3065: "slinterbase", + 3066: "netattachsdmp", + 3067: "fjhpjp", + 3068: "ls3bcast", + 3069: "ls3", + 3070: "mgxswitch", + 3072: "csd-monitor", + 3073: "vcrp", + 3074: "xbox", + 3075: "orbix-locator", + 3076: "orbix-config", + 3077: "orbix-loc-ssl", + 3078: "orbix-cfg-ssl", + 3079: "lv-frontpanel", + 3080: "stm-pproc", + 3081: "tl1-lv", + 3082: "tl1-raw", + 3083: "tl1-telnet", + 3084: "itm-mccs", + 3085: "pcihreq", + 3086: "jdl-dbkitchen", + 3087: "asoki-sma", + 3088: "xdtp", + 3089: "ptk-alink", + 3090: "stss", + 3091: "1ci-smcs", + 3093: "rapidmq-center", + 3094: "rapidmq-reg", + 3095: "panasas", + 3096: "ndl-aps", + 3098: "umm-port", + 3099: "chmd", + 3100: "opcon-xps", + 3101: "hp-pxpib", + 3102: "slslavemon", + 3103: "autocuesmi", + 3104: "autocuetime", + 3105: "cardbox", + 3106: "cardbox-http", + 3107: "business", + 3108: "geolocate", + 3109: "personnel", + 3110: "sim-control", + 3111: "wsynch", + 3112: "ksysguard", + 3113: "cs-auth-svr", + 3114: "ccmad", + 3115: "mctet-master", + 3116: "mctet-gateway", + 3117: "mctet-jserv", + 3118: "pkagent", + 3119: "d2000kernel", + 3120: "d2000webserver", + 3122: "vtr-emulator", + 3123: "edix", + 3124: "beacon-port", + 3125: "a13-an", + 3127: "ctx-bridge", + 3128: "ndl-aas", + 3129: "netport-id", + 3130: "icpv2", + 3131: "netbookmark", + 3132: "ms-rule-engine", + 3133: "prism-deploy", + 3134: "ecp", + 3135: "peerbook-port", + 3136: "grubd", + 3137: "rtnt-1", + 3138: "rtnt-2", + 3139: "incognitorv", + 3140: "ariliamulti", + 3141: "vmodem", + 3142: "rdc-wh-eos", + 3143: "seaview", + 3144: "tarantella", + 3145: "csi-lfap", + 3146: "bears-02", + 3147: "rfio", + 3148: "nm-game-admin", + 3149: "nm-game-server", + 3150: "nm-asses-admin", + 3151: "nm-assessor", + 3152: "feitianrockey", + 3153: "s8-client-port", + 3154: "ccmrmi", + 3155: "jpegmpeg", + 3156: "indura", + 3157: "e3consultants", + 3158: "stvp", + 3159: "navegaweb-port", + 3160: "tip-app-server", + 3161: "doc1lm", + 3162: "sflm", + 3163: "res-sap", + 3164: "imprs", + 3165: "newgenpay", + 3166: "sossecollector", + 3167: "nowcontact", + 3168: "poweronnud", + 3169: "serverview-as", + 3170: "serverview-asn", + 3171: "serverview-gf", + 3172: "serverview-rm", + 3173: "serverview-icc", + 3174: "armi-server", + 3175: "t1-e1-over-ip", + 3176: "ars-master", + 3177: "phonex-port", + 3178: "radclientport", + 3179: "h2gf-w-2m", + 3180: "mc-brk-srv", + 3181: "bmcpatrolagent", + 3182: "bmcpatrolrnvu", + 3183: "cops-tls", + 3184: "apogeex-port", + 3185: "smpppd", + 3186: "iiw-port", + 3187: "odi-port", + 3188: "brcm-comm-port", + 3189: "pcle-infex", + 3190: "csvr-proxy", + 3191: "csvr-sslproxy", + 3192: "firemonrcc", + 3193: "spandataport", + 3194: "magbind", + 3195: "ncu-1", + 3196: "ncu-2", + 3197: "embrace-dp-s", + 3198: "embrace-dp-c", + 3199: "dmod-workspace", + 3200: "tick-port", + 3201: "cpq-tasksmart", + 3202: "intraintra", + 3203: "netwatcher-mon", + 3204: "netwatcher-db", + 3205: "isns", + 3206: "ironmail", + 3207: "vx-auth-port", + 3208: "pfu-prcallback", + 3209: "netwkpathengine", + 3210: "flamenco-proxy", + 3211: "avsecuremgmt", + 3212: "surveyinst", + 3213: "neon24x7", + 3214: "jmq-daemon-1", + 3215: "jmq-daemon-2", + 3216: "ferrari-foam", + 3217: "unite", + 3218: "smartpackets", + 3219: "wms-messenger", + 3220: "xnm-ssl", + 3221: "xnm-clear-text", + 3222: "glbp", + 3223: "digivote", + 3224: "aes-discovery", + 3225: "fcip-port", + 3226: "isi-irp", + 3227: "dwnmshttp", + 3228: "dwmsgserver", + 3229: "global-cd-port", + 3230: "sftdst-port", + 3231: "vidigo", + 3232: "mdtp", + 3233: "whisker", + 3234: "alchemy", + 3235: "mdap-port", + 3236: "apparenet-ts", + 3237: "apparenet-tps", + 3238: "apparenet-as", + 3239: "apparenet-ui", + 3240: "triomotion", + 3241: "sysorb", + 3242: "sdp-id-port", + 3243: "timelot", + 3244: "onesaf", + 3245: "vieo-fe", + 3246: "dvt-system", + 3247: "dvt-data", + 3248: "procos-lm", + 3249: "ssp", + 3250: "hicp", + 3251: "sysscanner", + 3252: "dhe", + 3253: "pda-data", + 3254: "pda-sys", + 3255: "semaphore", + 3256: "cpqrpm-agent", + 3257: "cpqrpm-server", + 3258: "ivecon-port", + 3259: "epncdp2", + 3260: "iscsi-target", + 3261: "winshadow", + 3262: "necp", + 3263: "ecolor-imager", + 3264: "ccmail", + 3265: "altav-tunnel", + 3266: "ns-cfg-server", + 3267: "ibm-dial-out", + 3268: "msft-gc", + 3269: "msft-gc-ssl", + 3270: "verismart", + 3271: "csoft-prev", + 3272: "user-manager", + 3273: "sxmp", + 3274: "ordinox-server", + 3275: "samd", + 3276: "maxim-asics", + 3277: "awg-proxy", + 3278: "lkcmserver", + 3279: "admind", + 3280: "vs-server", + 3281: "sysopt", + 3282: "datusorb", + 3283: "Apple Remote Desktop (Net Assistant)", + 3284: "4talk", + 3285: "plato", + 3286: "e-net", + 3287: "directvdata", + 3288: "cops", + 3289: "enpc", + 3290: "caps-lm", + 3291: "sah-lm", + 3292: "cart-o-rama", + 3293: "fg-fps", + 3294: "fg-gip", + 3295: "dyniplookup", + 3296: "rib-slm", + 3297: "cytel-lm", + 3298: "deskview", + 3299: "pdrncs", + 3302: "mcs-fastmail", + 3303: "opsession-clnt", + 3304: "opsession-srvr", + 3305: "odette-ftp", + 3306: "mysql", + 3307: "opsession-prxy", + 3308: "tns-server", + 3309: "tns-adv", + 3310: "dyna-access", + 3311: "mcns-tel-ret", + 3312: "appman-server", + 3313: "uorb", + 3314: "uohost", + 3315: "cdid", + 3316: "aicc-cmi", + 3317: "vsaiport", + 3318: "ssrip", + 3319: "sdt-lmd", + 3320: "officelink2000", + 3321: "vnsstr", + 3326: "sftu", + 3327: "bbars", + 3328: "egptlm", + 3329: "hp-device-disc", + 3330: "mcs-calypsoicf", + 3331: "mcs-messaging", + 3332: "mcs-mailsvr", + 3333: "dec-notes", + 3334: "directv-web", + 3335: "directv-soft", + 3336: "directv-tick", + 3337: "directv-catlg", + 3338: "anet-b", + 3339: "anet-l", + 3340: "anet-m", + 3341: "anet-h", + 3342: "webtie", + 3343: "ms-cluster-net", + 3344: "bnt-manager", + 3345: "influence", + 3346: "trnsprntproxy", + 3347: "phoenix-rpc", + 3348: "pangolin-laser", + 3349: "chevinservices", + 3350: "findviatv", + 3351: "btrieve", + 3352: "ssql", + 3353: "fatpipe", + 3354: "suitjd", + 3355: "ordinox-dbase", + 3356: "upnotifyps", + 3357: "adtech-test", + 3358: "mpsysrmsvr", + 3359: "wg-netforce", + 3360: "kv-server", + 3361: "kv-agent", + 3362: "dj-ilm", + 3363: "nati-vi-server", + 3364: "creativeserver", + 3365: "contentserver", + 3366: "creativepartnr", + 3372: "tip2", + 3373: "lavenir-lm", + 3374: "cluster-disc", + 3375: "vsnm-agent", + 3376: "cdbroker", + 3377: "cogsys-lm", + 3378: "wsicopy", + 3379: "socorfs", + 3380: "sns-channels", + 3381: "geneous", + 3382: "fujitsu-neat", + 3383: "esp-lm", + 3384: "hp-clic", + 3385: "qnxnetman", + 3386: "gprs-sig", + 3387: "backroomnet", + 3388: "cbserver", + 3389: "ms-wbt-server", + 3390: "dsc", + 3391: "savant", + 3392: "efi-lm", + 3393: "d2k-tapestry1", + 3394: "d2k-tapestry2", + 3395: "dyna-lm", + 3396: "printer-agent", + 3397: "cloanto-lm", + 3398: "mercantile", + 3399: "csms", + 3400: "csms2", + 3401: "filecast", + 3402: "fxaengine-net", + 3405: "nokia-ann-ch1", + 3406: "nokia-ann-ch2", + 3407: "ldap-admin", + 3408: "BESApi", + 3409: "networklens", + 3410: "networklenss", + 3411: "biolink-auth", + 3412: "xmlblaster", + 3413: "svnet", + 3414: "wip-port", + 3415: "bcinameservice", + 3416: "commandport", + 3417: "csvr", + 3418: "rnmap", + 3419: "softaudit", + 3420: "ifcp-port", + 3421: "bmap", + 3422: "rusb-sys-port", + 3423: "xtrm", + 3424: "xtrms", + 3425: "agps-port", + 3426: "arkivio", + 3427: "websphere-snmp", + 3428: "twcss", + 3429: "gcsp", + 3430: "ssdispatch", + 3431: "ndl-als", + 3432: "osdcp", + 3433: "opnet-smp", + 3434: "opencm", + 3435: "pacom", + 3436: "gc-config", + 3437: "autocueds", + 3438: "spiral-admin", + 3439: "hri-port", + 3440: "ans-console", + 3441: "connect-client", + 3442: "connect-server", + 3443: "ov-nnm-websrv", + 3444: "denali-server", + 3445: "monp", + 3446: "3comfaxrpc", + 3447: "directnet", + 3448: "dnc-port", + 3449: "hotu-chat", + 3450: "castorproxy", + 3451: "asam", + 3452: "sabp-signal", + 3453: "pscupd", + 3454: "mira", + 3455: "prsvp", + 3456: "vat", + 3457: "vat-control", + 3458: "d3winosfi", + 3459: "integral", + 3460: "edm-manager", + 3461: "edm-stager", + 3462: "edm-std-notify", + 3463: "edm-adm-notify", + 3464: "edm-mgr-sync", + 3465: "edm-mgr-cntrl", + 3466: "workflow", + 3467: "rcst", + 3468: "ttcmremotectrl", + 3469: "pluribus", + 3470: "jt400", + 3471: "jt400-ssl", + 3472: "jaugsremotec-1", + 3473: "jaugsremotec-2", + 3474: "ttntspauto", + 3475: "genisar-port", + 3476: "nppmp", + 3477: "ecomm", + 3478: "stun", + 3479: "twrpc", + 3480: "plethora", + 3481: "cleanerliverc", + 3482: "vulture", + 3483: "slim-devices", + 3484: "gbs-stp", + 3485: "celatalk", + 3486: "ifsf-hb-port", + 3487: "ltcudp", + 3488: "fs-rh-srv", + 3489: "dtp-dia", + 3490: "colubris", + 3491: "swr-port", + 3492: "tvdumtray-port", + 3493: "nut", + 3494: "ibm3494", + 3495: "seclayer-tcp", + 3496: "seclayer-tls", + 3497: "ipether232port", + 3498: "dashpas-port", + 3499: "sccip-media", + 3500: "rtmp-port", + 3501: "isoft-p2p", + 3502: "avinstalldisc", + 3503: "lsp-ping", + 3504: "ironstorm", + 3505: "ccmcomm", + 3506: "apc-3506", + 3507: "nesh-broker", + 3508: "interactionweb", + 3509: "vt-ssl", + 3510: "xss-port", + 3511: "webmail-2", + 3512: "aztec", + 3513: "arcpd", + 3514: "must-p2p", + 3515: "must-backplane", + 3516: "smartcard-port", + 3517: "802-11-iapp", + 3518: "artifact-msg", + 3519: "galileo", + 3520: "galileolog", + 3521: "mc3ss", + 3522: "nssocketport", + 3523: "odeumservlink", + 3524: "ecmport", + 3525: "eisport", + 3526: "starquiz-port", + 3527: "beserver-msg-q", + 3528: "jboss-iiop", + 3529: "jboss-iiop-ssl", + 3530: "gf", + 3531: "joltid", + 3532: "raven-rmp", + 3533: "raven-rdp", + 3534: "urld-port", + 3535: "ms-la", + 3536: "snac", + 3537: "ni-visa-remote", + 3538: "ibm-diradm", + 3539: "ibm-diradm-ssl", + 3540: "pnrp-port", + 3541: "voispeed-port", + 3542: "hacl-monitor", + 3543: "qftest-lookup", + 3544: "teredo", + 3545: "camac", + 3547: "symantec-sim", + 3548: "interworld", + 3549: "tellumat-nms", + 3550: "ssmpp", + 3551: "apcupsd", + 3552: "taserver", + 3553: "rbr-discovery", + 3554: "questnotify", + 3555: "razor", + 3556: "sky-transport", + 3557: "personalos-001", + 3558: "mcp-port", + 3559: "cctv-port", + 3560: "iniserve-port", + 3561: "bmc-onekey", + 3562: "sdbproxy", + 3563: "watcomdebug", + 3564: "esimport", + 3567: "dof-eps", + 3568: "dof-tunnel-sec", + 3569: "mbg-ctrl", + 3570: "mccwebsvr-port", + 3571: "megardsvr-port", + 3572: "megaregsvrport", + 3573: "tag-ups-1", + 3574: "dmaf-caster", + 3575: "ccm-port", + 3576: "cmc-port", + 3577: "config-port", + 3578: "data-port", + 3579: "ttat3lb", + 3580: "nati-svrloc", + 3581: "kfxaclicensing", + 3582: "press", + 3583: "canex-watch", + 3584: "u-dbap", + 3585: "emprise-lls", + 3586: "emprise-lsc", + 3587: "p2pgroup", + 3588: "sentinel", + 3589: "isomair", + 3590: "wv-csp-sms", + 3591: "gtrack-server", + 3592: "gtrack-ne", + 3593: "bpmd", + 3594: "mediaspace", + 3595: "shareapp", + 3596: "iw-mmogame", + 3597: "a14", + 3598: "a15", + 3599: "quasar-server", + 3600: "trap-daemon", + 3601: "visinet-gui", + 3602: "infiniswitchcl", + 3603: "int-rcv-cntrl", + 3604: "bmc-jmx-port", + 3605: "comcam-io", + 3606: "splitlock", + 3607: "precise-i3", + 3608: "trendchip-dcp", + 3609: "cpdi-pidas-cm", + 3610: "echonet", + 3611: "six-degrees", + 3612: "hp-dataprotect", + 3613: "alaris-disc", + 3614: "sigma-port", + 3615: "start-network", + 3616: "cd3o-protocol", + 3617: "sharp-server", + 3618: "aairnet-1", + 3619: "aairnet-2", + 3620: "ep-pcp", + 3621: "ep-nsp", + 3622: "ff-lr-port", + 3623: "haipe-discover", + 3624: "dist-upgrade", + 3625: "volley", + 3626: "bvcdaemon-port", + 3627: "jamserverport", + 3628: "ept-machine", + 3629: "escvpnet", + 3630: "cs-remote-db", + 3631: "cs-services", + 3632: "distcc", + 3633: "wacp", + 3634: "hlibmgr", + 3635: "sdo", + 3636: "servistaitsm", + 3637: "scservp", + 3638: "ehp-backup", + 3639: "xap-ha", + 3640: "netplay-port1", + 3641: "netplay-port2", + 3642: "juxml-port", + 3643: "audiojuggler", + 3644: "ssowatch", + 3645: "cyc", + 3646: "xss-srv-port", + 3647: "splitlock-gw", + 3648: "fjcp", + 3649: "nmmp", + 3650: "prismiq-plugin", + 3651: "xrpc-registry", + 3652: "vxcrnbuport", + 3653: "tsp", + 3654: "vaprtm", + 3655: "abatemgr", + 3656: "abatjss", + 3657: "immedianet-bcn", + 3658: "ps-ams", + 3659: "apple-sasl", + 3660: "can-nds-ssl", + 3661: "can-ferret-ssl", + 3662: "pserver", + 3663: "dtp", + 3664: "ups-engine", + 3665: "ent-engine", + 3666: "eserver-pap", + 3667: "infoexch", + 3668: "dell-rm-port", + 3669: "casanswmgmt", + 3670: "smile", + 3671: "efcp", + 3672: "lispworks-orb", + 3673: "mediavault-gui", + 3674: "wininstall-ipc", + 3675: "calltrax", + 3676: "va-pacbase", + 3677: "roverlog", + 3678: "ipr-dglt", + 3679: "Escale (Newton Dock)", + 3680: "npds-tracker", + 3681: "bts-x73", + 3682: "cas-mapi", + 3683: "bmc-ea", + 3684: "faxstfx-port", + 3685: "dsx-agent", + 3686: "tnmpv2", + 3687: "simple-push", + 3688: "simple-push-s", + 3689: "daap", + 3690: "svn", + 3691: "magaya-network", + 3692: "intelsync", + 3695: "bmc-data-coll", + 3696: "telnetcpcd", + 3697: "nw-license", + 3698: "sagectlpanel", + 3699: "kpn-icw", + 3700: "lrs-paging", + 3701: "netcelera", + 3702: "ws-discovery", + 3703: "adobeserver-3", + 3704: "adobeserver-4", + 3705: "adobeserver-5", + 3706: "rt-event", + 3707: "rt-event-s", + 3708: "sun-as-iiops", + 3709: "ca-idms", + 3710: "portgate-auth", + 3711: "edb-server2", + 3712: "sentinel-ent", + 3713: "tftps", + 3714: "delos-dms", + 3715: "anoto-rendezv", + 3716: "wv-csp-sms-cir", + 3717: "wv-csp-udp-cir", + 3718: "opus-services", + 3719: "itelserverport", + 3720: "ufastro-instr", + 3721: "xsync", + 3722: "xserveraid", + 3723: "sychrond", + 3724: "blizwow", + 3725: "na-er-tip", + 3726: "array-manager", + 3727: "e-mdu", + 3728: "e-woa", + 3729: "fksp-audit", + 3730: "client-ctrl", + 3731: "smap", + 3732: "m-wnn", + 3733: "multip-msg", + 3734: "synel-data", + 3735: "pwdis", + 3736: "rs-rmi", + 3738: "versatalk", + 3739: "launchbird-lm", + 3740: "heartbeat", + 3741: "wysdma", + 3742: "cst-port", + 3743: "ipcs-command", + 3744: "sasg", + 3745: "gw-call-port", + 3746: "linktest", + 3747: "linktest-s", + 3748: "webdata", + 3749: "cimtrak", + 3750: "cbos-ip-port", + 3751: "gprs-cube", + 3752: "vipremoteagent", + 3753: "nattyserver", + 3754: "timestenbroker", + 3755: "sas-remote-hlp", + 3756: "canon-capt", + 3757: "grf-port", + 3758: "apw-registry", + 3759: "exapt-lmgr", + 3760: "adtempusclient", + 3761: "gsakmp", + 3762: "gbs-smp", + 3763: "xo-wave", + 3764: "mni-prot-rout", + 3765: "rtraceroute", + 3767: "listmgr-port", + 3768: "rblcheckd", + 3769: "haipe-otnk", + 3770: "cindycollab", + 3771: "paging-port", + 3772: "ctp", + 3773: "ctdhercules", + 3774: "zicom", + 3775: "ispmmgr", + 3776: "dvcprov-port", + 3777: "jibe-eb", + 3778: "c-h-it-port", + 3779: "cognima", + 3780: "nnp", + 3781: "abcvoice-port", + 3782: "iso-tp0s", + 3783: "bim-pem", + 3784: "bfd-control", + 3785: "bfd-echo", + 3786: "upstriggervsw", + 3787: "fintrx", + 3788: "isrp-port", + 3789: "remotedeploy", + 3790: "quickbooksrds", + 3791: "tvnetworkvideo", + 3792: "sitewatch", + 3793: "dcsoftware", + 3794: "jaus", + 3795: "myblast", + 3796: "spw-dialer", + 3797: "idps", + 3798: "minilock", + 3799: "radius-dynauth", + 3800: "pwgpsi", + 3801: "ibm-mgr", + 3802: "vhd", + 3803: "soniqsync", + 3804: "iqnet-port", + 3805: "tcpdataserver", + 3806: "wsmlb", + 3807: "spugna", + 3808: "sun-as-iiops-ca", + 3809: "apocd", + 3810: "wlanauth", + 3811: "amp", + 3812: "neto-wol-server", + 3813: "rap-ip", + 3814: "neto-dcs", + 3815: "lansurveyorxml", + 3816: "sunlps-http", + 3817: "tapeware", + 3818: "crinis-hb", + 3819: "epl-slp", + 3820: "scp", + 3821: "pmcp", + 3822: "acp-discovery", + 3823: "acp-conduit", + 3824: "acp-policy", + 3825: "ffserver", + 3826: "warmux", + 3827: "netmpi", + 3828: "neteh", + 3829: "neteh-ext", + 3830: "cernsysmgmtagt", + 3831: "dvapps", + 3832: "xxnetserver", + 3833: "aipn-auth", + 3834: "spectardata", + 3835: "spectardb", + 3836: "markem-dcp", + 3837: "mkm-discovery", + 3838: "sos", + 3839: "amx-rms", + 3840: "flirtmitmir", + 3842: "nhci", + 3843: "quest-agent", + 3844: "rnm", + 3845: "v-one-spp", + 3846: "an-pcp", + 3847: "msfw-control", + 3848: "item", + 3849: "spw-dnspreload", + 3850: "qtms-bootstrap", + 3851: "spectraport", + 3852: "sse-app-config", + 3853: "sscan", + 3854: "stryker-com", + 3855: "opentrac", + 3856: "informer", + 3857: "trap-port", + 3858: "trap-port-mom", + 3859: "nav-port", + 3860: "sasp", + 3861: "winshadow-hd", + 3862: "giga-pocket", + 3863: "asap-udp", + 3865: "xpl", + 3866: "dzdaemon", + 3867: "dzoglserver", + 3869: "ovsam-mgmt", + 3870: "ovsam-d-agent", + 3871: "avocent-adsap", + 3872: "oem-agent", + 3873: "fagordnc", + 3874: "sixxsconfig", + 3875: "pnbscada", + 3876: "dl-agent", + 3877: "xmpcr-interface", + 3878: "fotogcad", + 3879: "appss-lm", + 3880: "igrs", + 3881: "idac", + 3882: "msdts1", + 3883: "vrpn", + 3884: "softrack-meter", + 3885: "topflow-ssl", + 3886: "nei-management", + 3887: "ciphire-data", + 3888: "ciphire-serv", + 3889: "dandv-tester", + 3890: "ndsconnect", + 3891: "rtc-pm-port", + 3892: "pcc-image-port", + 3893: "cgi-starapi", + 3894: "syam-agent", + 3895: "syam-smc", + 3896: "sdo-tls", + 3897: "sdo-ssh", + 3898: "senip", + 3899: "itv-control", + 3900: "udt-os", + 3901: "nimsh", + 3902: "nimaux", + 3903: "charsetmgr", + 3904: "omnilink-port", + 3905: "mupdate", + 3906: "topovista-data", + 3907: "imoguia-port", + 3908: "hppronetman", + 3909: "surfcontrolcpa", + 3910: "prnrequest", + 3911: "prnstatus", + 3912: "gbmt-stars", + 3913: "listcrt-port", + 3914: "listcrt-port-2", + 3915: "agcat", + 3916: "wysdmc", + 3917: "aftmux", + 3918: "pktcablemmcops", + 3919: "hyperip", + 3920: "exasoftport1", + 3921: "herodotus-net", + 3922: "sor-update", + 3923: "symb-sb-port", + 3924: "mpl-gprs-port", + 3925: "zmp", + 3926: "winport", + 3927: "natdataservice", + 3928: "netboot-pxe", + 3929: "smauth-port", + 3930: "syam-webserver", + 3931: "msr-plugin-port", + 3932: "dyn-site", + 3933: "plbserve-port", + 3934: "sunfm-port", + 3935: "sdp-portmapper", + 3936: "mailprox", + 3937: "dvbservdsc", + 3938: "dbcontrol-agent", + 3939: "aamp", + 3940: "xecp-node", + 3941: "homeportal-web", + 3942: "srdp", + 3943: "tig", + 3944: "sops", + 3945: "emcads", + 3946: "backupedge", + 3947: "ccp", + 3948: "apdap", + 3949: "drip", + 3950: "namemunge", + 3951: "pwgippfax", + 3952: "i3-sessionmgr", + 3953: "xmlink-connect", + 3954: "adrep", + 3955: "p2pcommunity", + 3956: "gvcp", + 3957: "mqe-broker", + 3958: "mqe-agent", + 3959: "treehopper", + 3960: "bess", + 3961: "proaxess", + 3962: "sbi-agent", + 3963: "thrp", + 3964: "sasggprs", + 3965: "ati-ip-to-ncpe", + 3966: "bflckmgr", + 3967: "ppsms", + 3968: "ianywhere-dbns", + 3969: "landmarks", + 3970: "lanrevagent", + 3971: "lanrevserver", + 3972: "iconp", + 3973: "progistics", + 3974: "citysearch", + 3975: "airshot", + 3976: "opswagent", + 3977: "opswmanager", + 3978: "secure-cfg-svr", + 3979: "smwan", + 3980: "acms", + 3981: "starfish", + 3982: "eis", + 3983: "eisp", + 3984: "mapper-nodemgr", + 3985: "mapper-mapethd", + 3986: "mapper-ws-ethd", + 3987: "centerline", + 3988: "dcs-config", + 3989: "bv-queryengine", + 3990: "bv-is", + 3991: "bv-smcsrv", + 3992: "bv-ds", + 3993: "bv-agent", + 3995: "iss-mgmt-ssl", + 3996: "abcsoftware", + 3997: "agentsease-db", + 3998: "dnx", + 3999: "nvcnet", + 4000: "terabase", + 4001: "newoak", + 4002: "pxc-spvr-ft", + 4003: "pxc-splr-ft", + 4004: "pxc-roid", + 4005: "pxc-pin", + 4006: "pxc-spvr", + 4007: "pxc-splr", + 4008: "netcheque", + 4009: "chimera-hwm", + 4010: "samsung-unidex", + 4011: "altserviceboot", + 4012: "pda-gate", + 4013: "acl-manager", + 4014: "taiclock", + 4015: "talarian-mcast1", + 4016: "talarian-mcast2", + 4017: "talarian-mcast3", + 4018: "talarian-mcast4", + 4019: "talarian-mcast5", + 4020: "trap", + 4021: "nexus-portal", + 4022: "dnox", + 4023: "esnm-zoning", + 4024: "tnp1-port", + 4025: "partimage", + 4026: "as-debug", + 4027: "bxp", + 4028: "dtserver-port", + 4029: "ip-qsig", + 4030: "jdmn-port", + 4031: "suucp", + 4032: "vrts-auth-port", + 4033: "sanavigator", + 4034: "ubxd", + 4035: "wap-push-http", + 4036: "wap-push-https", + 4037: "ravehd", + 4038: "fazzt-ptp", + 4039: "fazzt-admin", + 4040: "yo-main", + 4041: "houston", + 4042: "ldxp", + 4043: "nirp", + 4044: "ltp", + 4045: "npp", + 4046: "acp-proto", + 4047: "ctp-state", + 4049: "wafs", + 4050: "cisco-wafs", + 4051: "cppdp", + 4052: "interact", + 4053: "ccu-comm-1", + 4054: "ccu-comm-2", + 4055: "ccu-comm-3", + 4056: "lms", + 4057: "wfm", + 4058: "kingfisher", + 4059: "dlms-cosem", + 4060: "dsmeter-iatc", + 4061: "ice-location", + 4062: "ice-slocation", + 4063: "ice-router", + 4064: "ice-srouter", + 4065: "avanti-cdp", + 4066: "pmas", + 4067: "idp", + 4068: "ipfltbcst", + 4069: "minger", + 4070: "tripe", + 4071: "aibkup", + 4072: "zieto-sock", + 4073: "iRAPP", + 4074: "cequint-cityid", + 4075: "perimlan", + 4076: "seraph", + 4077: "ascomalarm", + 4079: "santools", + 4080: "lorica-in", + 4081: "lorica-in-sec", + 4082: "lorica-out", + 4083: "lorica-out-sec", + 4084: "fortisphere-vm", + 4086: "ftsync", + 4089: "opencore", + 4090: "omasgport", + 4091: "ewinstaller", + 4092: "ewdgs", + 4093: "pvxpluscs", + 4094: "sysrqd", + 4095: "xtgui", + 4096: "bre", + 4097: "patrolview", + 4098: "drmsfsd", + 4099: "dpcp", + 4100: "igo-incognito", + 4101: "brlp-0", + 4102: "brlp-1", + 4103: "brlp-2", + 4104: "brlp-3", + 4105: "shofar", + 4106: "synchronite", + 4107: "j-ac", + 4108: "accel", + 4109: "izm", + 4110: "g2tag", + 4111: "xgrid", + 4112: "apple-vpns-rp", + 4113: "aipn-reg", + 4114: "jomamqmonitor", + 4115: "cds", + 4116: "smartcard-tls", + 4117: "hillrserv", + 4118: "netscript", + 4119: "assuria-slm", + 4121: "e-builder", + 4122: "fprams", + 4123: "z-wave", + 4124: "tigv2", + 4125: "opsview-envoy", + 4126: "ddrepl", + 4127: "unikeypro", + 4128: "nufw", + 4129: "nuauth", + 4130: "fronet", + 4131: "stars", + 4132: "nuts-dem", + 4133: "nuts-bootp", + 4134: "nifty-hmi", + 4135: "cl-db-attach", + 4136: "cl-db-request", + 4137: "cl-db-remote", + 4138: "nettest", + 4139: "thrtx", + 4140: "cedros-fds", + 4141: "oirtgsvc", + 4142: "oidocsvc", + 4143: "oidsr", + 4145: "vvr-control", + 4146: "tgcconnect", + 4147: "vrxpservman", + 4148: "hhb-handheld", + 4149: "agslb", + 4150: "PowerAlert-nsa", + 4151: "menandmice-noh", + 4152: "idig-mux", + 4153: "mbl-battd", + 4154: "atlinks", + 4155: "bzr", + 4156: "stat-results", + 4157: "stat-scanner", + 4158: "stat-cc", + 4159: "nss", + 4160: "jini-discovery", + 4161: "omscontact", + 4162: "omstopology", + 4163: "silverpeakpeer", + 4164: "silverpeakcomm", + 4165: "altcp", + 4166: "joost", + 4167: "ddgn", + 4168: "pslicser", + 4169: "iadt-disc", + 4172: "pcoip", + 4173: "mma-discovery", + 4174: "sm-disc", + 4177: "wello", + 4178: "storman", + 4179: "MaxumSP", + 4180: "httpx", + 4181: "macbak", + 4182: "pcptcpservice", + 4183: "cyborgnet", + 4184: "universe-suite", + 4185: "wcpp", + 4188: "vatata", + 4191: "dsmipv6", + 4192: "azeti-bd", + 4197: "hctl", + 4199: "eims-admin", + 4300: "corelccam", + 4301: "d-data", + 4302: "d-data-control", + 4303: "srcp", + 4304: "owserver", + 4305: "batman", + 4306: "pinghgl", + 4307: "trueconf", + 4308: "compx-lockview", + 4309: "dserver", + 4310: "mirrtex", + 4320: "fdt-rcatp", + 4321: "rwhois", + 4322: "trim-event", + 4323: "trim-ice", + 4325: "geognosisman", + 4326: "geognosis", + 4327: "jaxer-web", + 4328: "jaxer-manager", + 4333: "ahsp", + 4340: "gaia", + 4341: "lisp-data", + 4342: "lisp-control", + 4343: "unicall", + 4344: "vinainstall", + 4345: "m4-network-as", + 4346: "elanlm", + 4347: "lansurveyor", + 4348: "itose", + 4349: "fsportmap", + 4350: "net-device", + 4351: "plcy-net-svcs", + 4352: "pjlink", + 4353: "f5-iquery", + 4354: "qsnet-trans", + 4355: "qsnet-workst", + 4356: "qsnet-assist", + 4357: "qsnet-cond", + 4358: "qsnet-nucl", + 4359: "omabcastltkm", + 4361: "nacnl", + 4362: "afore-vdp-disc", + 4366: "shadowstream", + 4368: "wxbrief", + 4369: "epmd", + 4370: "elpro-tunnel", + 4371: "l2c-disc", + 4372: "l2c-data", + 4373: "remctl", + 4375: "tolteces", + 4376: "bip", + 4377: "cp-spxsvr", + 4378: "cp-spxdpy", + 4379: "ctdb", + 4389: "xandros-cms", + 4390: "wiegand", + 4394: "apwi-disc", + 4395: "omnivisionesx", + 4400: "ds-srv", + 4401: "ds-srvr", + 4402: "ds-clnt", + 4403: "ds-user", + 4404: "ds-admin", + 4405: "ds-mail", + 4406: "ds-slp", + 4412: "smallchat", + 4413: "avi-nms-disc", + 4416: "pjj-player-disc", + 4418: "axysbridge", + 4420: "nvm-express", + 4425: "netrockey6", + 4426: "beacon-port-2", + 4430: "rsqlserver", + 4432: "l-acoustics", + 4441: "netblox", + 4442: "saris", + 4443: "pharos", + 4444: "krb524", + 4445: "upnotifyp", + 4446: "n1-fwp", + 4447: "n1-rmgmt", + 4448: "asc-slmd", + 4449: "privatewire", + 4450: "camp", + 4451: "ctisystemmsg", + 4452: "ctiprogramload", + 4453: "nssalertmgr", + 4454: "nssagentmgr", + 4455: "prchat-user", + 4456: "prchat-server", + 4457: "prRegister", + 4458: "mcp", + 4484: "hpssmgmt", + 4486: "icms", + 4488: "awacs-ice", + 4500: "ipsec-nat-t", + 4534: "armagetronad", + 4535: "ehs", + 4536: "ehs-ssl", + 4537: "wssauthsvc", + 4538: "swx-gate", + 4545: "worldscores", + 4546: "sf-lm", + 4547: "lanner-lm", + 4548: "synchromesh", + 4549: "aegate", + 4550: "gds-adppiw-db", + 4551: "ieee-mih", + 4552: "menandmice-mon", + 4554: "msfrs", + 4555: "rsip", + 4556: "dtn-bundle", + 4557: "mtcevrunqss", + 4558: "mtcevrunqman", + 4559: "hylafax", + 4566: "kwtc", + 4567: "tram", + 4568: "bmc-reporting", + 4569: "iax", + 4591: "l3t-at-an", + 4592: "hrpd-ith-at-an", + 4593: "ipt-anri-anri", + 4594: "ias-session", + 4595: "ias-paging", + 4596: "ias-neighbor", + 4597: "a21-an-1xbs", + 4598: "a16-an-an", + 4599: "a17-an-an", + 4600: "piranha1", + 4601: "piranha2", + 4621: "ventoso", + 4658: "playsta2-app", + 4659: "playsta2-lob", + 4660: "smaclmgr", + 4661: "kar2ouche", + 4662: "oms", + 4663: "noteit", + 4664: "ems", + 4665: "contclientms", + 4666: "eportcomm", + 4667: "mmacomm", + 4668: "mmaeds", + 4669: "eportcommdata", + 4670: "light", + 4671: "acter", + 4672: "rfa", + 4673: "cxws", + 4674: "appiq-mgmt", + 4675: "dhct-status", + 4676: "dhct-alerts", + 4677: "bcs", + 4678: "traversal", + 4679: "mgesupervision", + 4680: "mgemanagement", + 4681: "parliant", + 4682: "finisar", + 4683: "spike", + 4684: "rfid-rp1", + 4685: "autopac", + 4686: "msp-os", + 4687: "nst", + 4688: "mobile-p2p", + 4689: "altovacentral", + 4690: "prelude", + 4691: "mtn", + 4692: "conspiracy", + 4700: "netxms-agent", + 4701: "netxms-mgmt", + 4702: "netxms-sync", + 4711: "trinity-dist", + 4725: "truckstar", + 4726: "a26-fap-fgw", + 4727: "fcis-disc", + 4728: "capmux", + 4729: "gsmtap", + 4730: "gearman", + 4732: "ohmtrigger", + 4737: "ipdr-sp", + 4738: "solera-lpn", + 4739: "ipfix", + 4740: "ipfixs", + 4741: "lumimgrd", + 4742: "sicct-sdp", + 4743: "openhpid", + 4744: "ifsp", + 4745: "fmp", + 4746: "intelliadm-disc", + 4747: "buschtrommel", + 4749: "profilemac", + 4750: "ssad", + 4751: "spocp", + 4752: "snap", + 4753: "simon-disc", + 4754: "gre-in-udp", + 4755: "gre-udp-dtls", + 4784: "bfd-multi-ctl", + 4785: "cncp", + 4789: "vxlan", + 4790: "vxlan-gpe", + 4791: "roce", + 4800: "iims", + 4801: "iwec", + 4802: "ilss", + 4803: "notateit-disc", + 4804: "aja-ntv4-disc", + 4827: "htcp", + 4837: "varadero-0", + 4838: "varadero-1", + 4839: "varadero-2", + 4840: "opcua-udp", + 4841: "quosa", + 4842: "gw-asv", + 4843: "opcua-tls", + 4844: "gw-log", + 4845: "wcr-remlib", + 4846: "contamac-icm", + 4847: "wfc", + 4848: "appserv-http", + 4849: "appserv-https", + 4850: "sun-as-nodeagt", + 4851: "derby-repli", + 4867: "unify-debug", + 4868: "phrelay", + 4869: "phrelaydbg", + 4870: "cc-tracking", + 4871: "wired", + 4876: "tritium-can", + 4877: "lmcs", + 4878: "inst-discovery", + 4881: "socp-t", + 4882: "socp-c", + 4884: "hivestor", + 4885: "abbs", + 4894: "lyskom", + 4899: "radmin-port", + 4900: "hfcs", + 4914: "bones", + 4936: "an-signaling", + 4937: "atsc-mh-ssc", + 4940: "eq-office-4940", + 4941: "eq-office-4941", + 4942: "eq-office-4942", + 4949: "munin", + 4950: "sybasesrvmon", + 4951: "pwgwims", + 4952: "sagxtsds", + 4969: "ccss-qmm", + 4970: "ccss-qsm", + 4980: "ctxs-vpp", + 4986: "mrip", + 4987: "smar-se-port1", + 4988: "smar-se-port2", + 4989: "parallel", + 4990: "busycal", + 4991: "vrt", + 4999: "hfcs-manager", + 5000: "commplex-main", + 5001: "commplex-link", + 5002: "rfe", + 5003: "fmpro-internal", + 5004: "avt-profile-1", + 5005: "avt-profile-2", + 5006: "wsm-server", + 5007: "wsm-server-ssl", + 5008: "synapsis-edge", + 5009: "winfs", + 5010: "telelpathstart", + 5011: "telelpathattack", + 5012: "nsp", + 5013: "fmpro-v6", + 5014: "onpsocket", + 5020: "zenginkyo-1", + 5021: "zenginkyo-2", + 5022: "mice", + 5023: "htuilsrv", + 5024: "scpi-telnet", + 5025: "scpi-raw", + 5026: "strexec-d", + 5027: "strexec-s", + 5029: "infobright", + 5030: "surfpass", + 5031: "dmp", + 5042: "asnaacceler8db", + 5043: "swxadmin", + 5044: "lxi-evntsvc", + 5046: "vpm-udp", + 5047: "iscape", + 5049: "ivocalize", + 5050: "mmcc", + 5051: "ita-agent", + 5052: "ita-manager", + 5053: "rlm-disc", + 5055: "unot", + 5056: "intecom-ps1", + 5057: "intecom-ps2", + 5058: "locus-disc", + 5059: "sds", + 5060: "sip", + 5061: "sips", + 5062: "na-localise", + 5064: "ca-1", + 5065: "ca-2", + 5066: "stanag-5066", + 5067: "authentx", + 5069: "i-net-2000-npr", + 5070: "vtsas", + 5071: "powerschool", + 5072: "ayiya", + 5073: "tag-pm", + 5074: "alesquery", + 5078: "pixelpusher", + 5079: "cp-spxrpts", + 5080: "onscreen", + 5081: "sdl-ets", + 5082: "qcp", + 5083: "qfp", + 5084: "llrp", + 5085: "encrypted-llrp", + 5092: "magpie", + 5093: "sentinel-lm", + 5094: "hart-ip", + 5099: "sentlm-srv2srv", + 5100: "socalia", + 5101: "talarian-udp", + 5102: "oms-nonsecure", + 5104: "tinymessage", + 5105: "hughes-ap", + 5111: "taep-as-svc", + 5112: "pm-cmdsvr", + 5116: "emb-proj-cmd", + 5120: "barracuda-bbs", + 5133: "nbt-pc", + 5136: "minotaur-sa", + 5137: "ctsd", + 5145: "rmonitor-secure", + 5150: "atmp", + 5151: "esri-sde", + 5152: "sde-discovery", + 5154: "bzflag", + 5155: "asctrl-agent", + 5164: "vpa-disc", + 5165: "ife-icorp", + 5166: "winpcs", + 5167: "scte104", + 5168: "scte30", + 5190: "aol", + 5191: "aol-1", + 5192: "aol-2", + 5193: "aol-3", + 5200: "targus-getdata", + 5201: "targus-getdata1", + 5202: "targus-getdata2", + 5203: "targus-getdata3", + 5223: "hpvirtgrp", + 5224: "hpvirtctrl", + 5225: "hp-server", + 5226: "hp-status", + 5227: "perfd", + 5234: "eenet", + 5235: "galaxy-network", + 5236: "padl2sim", + 5237: "mnet-discovery", + 5245: "downtools-disc", + 5246: "capwap-control", + 5247: "capwap-data", + 5248: "caacws", + 5249: "caaclang2", + 5250: "soagateway", + 5251: "caevms", + 5252: "movaz-ssc", + 5264: "3com-njack-1", + 5265: "3com-njack-2", + 5270: "cartographerxmp", + 5271: "cuelink-disc", + 5272: "pk", + 5282: "transmit-port", + 5298: "presence", + 5299: "nlg-data", + 5300: "hacl-hb", + 5301: "hacl-gs", + 5302: "hacl-cfg", + 5303: "hacl-probe", + 5304: "hacl-local", + 5305: "hacl-test", + 5306: "sun-mc-grp", + 5307: "sco-aip", + 5308: "cfengine", + 5309: "jprinter", + 5310: "outlaws", + 5312: "permabit-cs", + 5313: "rrdp", + 5314: "opalis-rbt-ipc", + 5315: "hacl-poll", + 5343: "kfserver", + 5344: "xkotodrcp", + 5349: "stuns", + 5350: "pcp-multicast", + 5351: "pcp", + 5352: "dns-llq", + 5353: "mdns", + 5354: "mdnsresponder", + 5355: "llmnr", + 5356: "ms-smlbiz", + 5357: "wsdapi", + 5358: "wsdapi-s", + 5359: "ms-alerter", + 5360: "ms-sideshow", + 5361: "ms-s-sideshow", + 5362: "serverwsd2", + 5363: "net-projection", + 5364: "kdnet", + 5397: "stresstester", + 5398: "elektron-admin", + 5399: "securitychase", + 5400: "excerpt", + 5401: "excerpts", + 5402: "mftp", + 5403: "hpoms-ci-lstn", + 5404: "hpoms-dps-lstn", + 5405: "netsupport", + 5406: "systemics-sox", + 5407: "foresyte-clear", + 5408: "foresyte-sec", + 5409: "salient-dtasrv", + 5410: "salient-usrmgr", + 5411: "actnet", + 5412: "continuus", + 5413: "wwiotalk", + 5414: "statusd", + 5415: "ns-server", + 5416: "sns-gateway", + 5417: "sns-agent", + 5418: "mcntp", + 5419: "dj-ice", + 5420: "cylink-c", + 5421: "netsupport2", + 5422: "salient-mux", + 5423: "virtualuser", + 5424: "beyond-remote", + 5425: "br-channel", + 5426: "devbasic", + 5427: "sco-peer-tta", + 5428: "telaconsole", + 5429: "base", + 5430: "radec-corp", + 5431: "park-agent", + 5432: "postgresql", + 5433: "pyrrho", + 5434: "sgi-arrayd", + 5435: "sceanics", + 5436: "pmip6-cntl", + 5437: "pmip6-data", + 5443: "spss", + 5450: "tiepie-disc", + 5453: "surebox", + 5454: "apc-5454", + 5455: "apc-5455", + 5456: "apc-5456", + 5461: "silkmeter", + 5462: "ttl-publisher", + 5463: "ttlpriceproxy", + 5464: "quailnet", + 5465: "netops-broker", + 5474: "apsolab-rpc", + 5500: "fcp-addr-srvr1", + 5501: "fcp-addr-srvr2", + 5502: "fcp-srvr-inst1", + 5503: "fcp-srvr-inst2", + 5504: "fcp-cics-gw1", + 5505: "checkoutdb", + 5506: "amc", + 5553: "sgi-eventmond", + 5554: "sgi-esphttp", + 5555: "personal-agent", + 5556: "freeciv", + 5567: "dof-dps-mc-sec", + 5568: "sdt", + 5569: "rdmnet-device", + 5573: "sdmmp", + 5580: "tmosms0", + 5581: "tmosms1", + 5582: "fac-restore", + 5583: "tmo-icon-sync", + 5584: "bis-web", + 5585: "bis-sync", + 5597: "ininmessaging", + 5598: "mctfeed", + 5599: "esinstall", + 5600: "esmmanager", + 5601: "esmagent", + 5602: "a1-msc", + 5603: "a1-bs", + 5604: "a3-sdunode", + 5605: "a4-sdunode", + 5627: "ninaf", + 5628: "htrust", + 5629: "symantec-sfdb", + 5630: "precise-comm", + 5631: "pcanywheredata", + 5632: "pcanywherestat", + 5633: "beorl", + 5634: "xprtld", + 5670: "zre-disc", + 5671: "amqps", + 5672: "amqp", + 5673: "jms", + 5674: "hyperscsi-port", + 5675: "v5ua", + 5676: "raadmin", + 5677: "questdb2-lnchr", + 5678: "rrac", + 5679: "dccm", + 5680: "auriga-router", + 5681: "ncxcp", + 5682: "brightcore", + 5683: "coap", + 5684: "coaps", + 5687: "gog-multiplayer", + 5688: "ggz", + 5689: "qmvideo", + 5713: "proshareaudio", + 5714: "prosharevideo", + 5715: "prosharedata", + 5716: "prosharerequest", + 5717: "prosharenotify", + 5718: "dpm", + 5719: "dpm-agent", + 5720: "ms-licensing", + 5721: "dtpt", + 5722: "msdfsr", + 5723: "omhs", + 5724: "omsdk", + 5728: "io-dist-group", + 5729: "openmail", + 5730: "unieng", + 5741: "ida-discover1", + 5742: "ida-discover2", + 5743: "watchdoc-pod", + 5744: "watchdoc", + 5745: "fcopy-server", + 5746: "fcopys-server", + 5747: "tunatic", + 5748: "tunalyzer", + 5750: "rscd", + 5755: "openmailg", + 5757: "x500ms", + 5766: "openmailns", + 5767: "s-openmail", + 5768: "openmailpxy", + 5769: "spramsca", + 5770: "spramsd", + 5771: "netagent", + 5777: "dali-port", + 5781: "3par-evts", + 5782: "3par-mgmt", + 5783: "3par-mgmt-ssl", + 5784: "ibar", + 5785: "3par-rcopy", + 5786: "cisco-redu", + 5787: "waascluster", + 5793: "xtreamx", + 5794: "spdp", + 5813: "icmpd", + 5814: "spt-automation", + 5859: "wherehoo", + 5863: "ppsuitemsg", + 5900: "rfb", + 5910: "cm", + 5911: "cpdlc", + 5912: "fis", + 5913: "ads-c", + 5963: "indy", + 5968: "mppolicy-v5", + 5969: "mppolicy-mgr", + 5984: "couchdb", + 5985: "wsman", + 5986: "wsmans", + 5987: "wbem-rmi", + 5988: "wbem-http", + 5989: "wbem-https", + 5990: "wbem-exp-https", + 5991: "nuxsl", + 5992: "consul-insight", + 5999: "cvsup", + 6064: "ndl-ahp-svc", + 6065: "winpharaoh", + 6066: "ewctsp", + 6069: "trip", + 6070: "messageasap", + 6071: "ssdtp", + 6072: "diagnose-proc", + 6073: "directplay8", + 6074: "max", + 6080: "gue", + 6081: "geneve", + 6082: "p25cai", + 6083: "miami-bcast", + 6085: "konspire2b", + 6086: "pdtp", + 6087: "ldss", + 6088: "doglms-notify", + 6100: "synchronet-db", + 6101: "synchronet-rtc", + 6102: "synchronet-upd", + 6103: "rets", + 6104: "dbdb", + 6105: "primaserver", + 6106: "mpsserver", + 6107: "etc-control", + 6108: "sercomm-scadmin", + 6109: "globecast-id", + 6110: "softcm", + 6111: "spc", + 6112: "dtspcd", + 6118: "tipc", + 6122: "bex-webadmin", + 6123: "backup-express", + 6124: "pnbs", + 6133: "nbt-wol", + 6140: "pulsonixnls", + 6141: "meta-corp", + 6142: "aspentec-lm", + 6143: "watershed-lm", + 6144: "statsci1-lm", + 6145: "statsci2-lm", + 6146: "lonewolf-lm", + 6147: "montage-lm", + 6148: "ricardo-lm", + 6149: "tal-pod", + 6160: "ecmp-data", + 6161: "patrol-ism", + 6162: "patrol-coll", + 6163: "pscribe", + 6200: "lm-x", + 6201: "thermo-calc", + 6209: "qmtps", + 6222: "radmind", + 6241: "jeol-nsddp-1", + 6242: "jeol-nsddp-2", + 6243: "jeol-nsddp-3", + 6244: "jeol-nsddp-4", + 6251: "tl1-raw-ssl", + 6252: "tl1-ssh", + 6253: "crip", + 6268: "grid", + 6269: "grid-alt", + 6300: "bmc-grx", + 6301: "bmc-ctd-ldap", + 6306: "ufmp", + 6315: "scup-disc", + 6316: "abb-escp", + 6317: "nav-data", + 6320: "repsvc", + 6321: "emp-server1", + 6322: "emp-server2", + 6324: "hrd-ns-disc", + 6343: "sflow", + 6346: "gnutella-svc", + 6347: "gnutella-rtr", + 6350: "adap", + 6355: "pmcs", + 6360: "metaedit-mu", + 6363: "ndn", + 6370: "metaedit-se", + 6382: "metatude-mds", + 6389: "clariion-evr01", + 6390: "metaedit-ws", + 6417: "faxcomservice", + 6419: "svdrp-disc", + 6420: "nim-vdrshell", + 6421: "nim-wan", + 6443: "sun-sr-https", + 6444: "sge-qmaster", + 6445: "sge-execd", + 6446: "mysql-proxy", + 6455: "skip-cert-recv", + 6456: "skip-cert-send", + 6464: "ieee11073-20701", + 6471: "lvision-lm", + 6480: "sun-sr-http", + 6481: "servicetags", + 6482: "ldoms-mgmt", + 6483: "SunVTS-RMI", + 6484: "sun-sr-jms", + 6485: "sun-sr-iiop", + 6486: "sun-sr-iiops", + 6487: "sun-sr-iiop-aut", + 6488: "sun-sr-jmx", + 6489: "sun-sr-admin", + 6500: "boks", + 6501: "boks-servc", + 6502: "boks-servm", + 6503: "boks-clntd", + 6505: "badm-priv", + 6506: "badm-pub", + 6507: "bdir-priv", + 6508: "bdir-pub", + 6509: "mgcs-mfp-port", + 6510: "mcer-port", + 6511: "dccp-udp", + 6514: "syslog-tls", + 6515: "elipse-rec", + 6543: "lds-distrib", + 6544: "lds-dump", + 6547: "apc-6547", + 6548: "apc-6548", + 6549: "apc-6549", + 6550: "fg-sysupdate", + 6551: "sum", + 6558: "xdsxdm", + 6566: "sane-port", + 6568: "rp-reputation", + 6579: "affiliate", + 6580: "parsec-master", + 6581: "parsec-peer", + 6582: "parsec-game", + 6583: "joaJewelSuite", + 6619: "odette-ftps", + 6620: "kftp-data", + 6621: "kftp", + 6622: "mcftp", + 6623: "ktelnet", + 6626: "wago-service", + 6627: "nexgen", + 6628: "afesc-mc", + 6629: "nexgen-aux", + 6633: "cisco-vpath-tun", + 6634: "mpls-pm", + 6635: "mpls-udp", + 6636: "mpls-udp-dtls", + 6653: "openflow", + 6657: "palcom-disc", + 6670: "vocaltec-gold", + 6671: "p4p-portal", + 6672: "vision-server", + 6673: "vision-elmd", + 6678: "vfbp-disc", + 6679: "osaut", + 6689: "tsa", + 6696: "babel", + 6701: "kti-icad-srvr", + 6702: "e-design-net", + 6703: "e-design-web", + 6714: "ibprotocol", + 6715: "fibotrader-com", + 6767: "bmc-perf-agent", + 6768: "bmc-perf-mgrd", + 6769: "adi-gxp-srvprt", + 6770: "plysrv-http", + 6771: "plysrv-https", + 6784: "bfd-lag", + 6785: "dgpf-exchg", + 6786: "smc-jmx", + 6787: "smc-admin", + 6788: "smc-http", + 6790: "hnmp", + 6791: "hnm", + 6801: "acnet", + 6831: "ambit-lm", + 6841: "netmo-default", + 6842: "netmo-http", + 6850: "iccrushmore", + 6868: "acctopus-st", + 6888: "muse", + 6935: "ethoscan", + 6936: "xsmsvc", + 6946: "bioserver", + 6951: "otlp", + 6961: "jmact3", + 6962: "jmevt2", + 6963: "swismgr1", + 6964: "swismgr2", + 6965: "swistrap", + 6966: "swispol", + 6969: "acmsoda", + 6997: "MobilitySrv", + 6998: "iatp-highpri", + 6999: "iatp-normalpri", + 7000: "afs3-fileserver", + 7001: "afs3-callback", + 7002: "afs3-prserver", + 7003: "afs3-vlserver", + 7004: "afs3-kaserver", + 7005: "afs3-volser", + 7006: "afs3-errors", + 7007: "afs3-bos", + 7008: "afs3-update", + 7009: "afs3-rmtsys", + 7010: "ups-onlinet", + 7011: "talon-disc", + 7012: "talon-engine", + 7013: "microtalon-dis", + 7014: "microtalon-com", + 7015: "talon-webserver", + 7016: "spg", + 7017: "grasp", + 7019: "doceri-view", + 7020: "dpserve", + 7021: "dpserveadmin", + 7022: "ctdp", + 7023: "ct2nmcs", + 7024: "vmsvc", + 7025: "vmsvc-2", + 7030: "op-probe", + 7040: "quest-disc", + 7070: "arcp", + 7071: "iwg1", + 7080: "empowerid", + 7088: "zixi-transport", + 7095: "jdp-disc", + 7099: "lazy-ptop", + 7100: "font-service", + 7101: "elcn", + 7107: "aes-x170", + 7121: "virprot-lm", + 7128: "scenidm", + 7129: "scenccs", + 7161: "cabsm-comm", + 7162: "caistoragemgr", + 7163: "cacsambroker", + 7164: "fsr", + 7165: "doc-server", + 7166: "aruba-server", + 7169: "ccag-pib", + 7170: "nsrp", + 7171: "drm-production", + 7174: "clutild", + 7181: "janus-disc", + 7200: "fodms", + 7201: "dlip", + 7227: "ramp", + 7235: "aspcoordination", + 7244: "frc-hicp-disc", + 7262: "cnap", + 7272: "watchme-7272", + 7273: "oma-rlp", + 7274: "oma-rlp-s", + 7275: "oma-ulp", + 7276: "oma-ilp", + 7277: "oma-ilp-s", + 7278: "oma-dcdocbs", + 7279: "ctxlic", + 7280: "itactionserver1", + 7281: "itactionserver2", + 7282: "mzca-alert", + 7365: "lcm-server", + 7391: "mindfilesys", + 7392: "mrssrendezvous", + 7393: "nfoldman", + 7394: "fse", + 7395: "winqedit", + 7397: "hexarc", + 7400: "rtps-discovery", + 7401: "rtps-dd-ut", + 7402: "rtps-dd-mt", + 7410: "ionixnetmon", + 7411: "daqstream", + 7421: "mtportmon", + 7426: "pmdmgr", + 7427: "oveadmgr", + 7428: "ovladmgr", + 7429: "opi-sock", + 7430: "xmpv7", + 7431: "pmd", + 7437: "faximum", + 7443: "oracleas-https", + 7473: "rise", + 7491: "telops-lmd", + 7500: "silhouette", + 7501: "ovbus", + 7510: "ovhpas", + 7511: "pafec-lm", + 7542: "saratoga", + 7543: "atul", + 7544: "nta-ds", + 7545: "nta-us", + 7546: "cfs", + 7547: "cwmp", + 7548: "tidp", + 7549: "nls-tl", + 7550: "cloudsignaling", + 7560: "sncp", + 7566: "vsi-omega", + 7570: "aries-kfinder", + 7574: "coherence-disc", + 7588: "sun-lm", + 7606: "mipi-debug", + 7624: "indi", + 7627: "soap-http", + 7628: "zen-pawn", + 7629: "xdas", + 7633: "pmdfmgt", + 7648: "cuseeme", + 7674: "imqtunnels", + 7675: "imqtunnel", + 7676: "imqbrokerd", + 7677: "sun-user-https", + 7680: "pando-pub", + 7689: "collaber", + 7697: "klio", + 7707: "sync-em7", + 7708: "scinet", + 7720: "medimageportal", + 7724: "nsdeepfreezectl", + 7725: "nitrogen", + 7726: "freezexservice", + 7727: "trident-data", + 7728: "osvr", + 7734: "smip", + 7738: "aiagent", + 7741: "scriptview", + 7743: "sstp-1", + 7744: "raqmon-pdu", + 7747: "prgp", + 7777: "cbt", + 7778: "interwise", + 7779: "vstat", + 7781: "accu-lmgr", + 7784: "s-bfd", + 7786: "minivend", + 7787: "popup-reminders", + 7789: "office-tools", + 7794: "q3ade", + 7797: "pnet-conn", + 7798: "pnet-enc", + 7799: "altbsdp", + 7800: "asr", + 7801: "ssp-client", + 7802: "vns-tp", + 7810: "rbt-wanopt", + 7845: "apc-7845", + 7846: "apc-7846", + 7872: "mipv6tls", + 7880: "pss", + 7887: "ubroker", + 7900: "mevent", + 7901: "tnos-sp", + 7902: "tnos-dp", + 7903: "tnos-dps", + 7913: "qo-secure", + 7932: "t2-drm", + 7933: "t2-brm", + 7962: "generalsync", + 7967: "supercell", + 7979: "micromuse-ncps", + 7980: "quest-vista", + 7982: "sossd-disc", + 7998: "usicontentpush", + 7999: "irdmi2", + 8000: "irdmi", + 8001: "vcom-tunnel", + 8002: "teradataordbms", + 8003: "mcreport", + 8005: "mxi", + 8006: "wpl-disc", + 8007: "warppipe", + 8008: "http-alt", + 8019: "qbdb", + 8020: "intu-ec-svcdisc", + 8021: "intu-ec-client", + 8022: "oa-system", + 8025: "ca-audit-da", + 8026: "ca-audit-ds", + 8032: "pro-ed", + 8033: "mindprint", + 8034: "vantronix-mgmt", + 8040: "ampify", + 8041: "enguity-xccetp", + 8052: "senomix01", + 8053: "senomix02", + 8054: "senomix03", + 8055: "senomix04", + 8056: "senomix05", + 8057: "senomix06", + 8058: "senomix07", + 8059: "senomix08", + 8060: "aero", + 8074: "gadugadu", + 8080: "http-alt", + 8081: "sunproxyadmin", + 8082: "us-cli", + 8083: "us-srv", + 8086: "d-s-n", + 8087: "simplifymedia", + 8088: "radan-http", + 8097: "sac", + 8100: "xprint-server", + 8115: "mtl8000-matrix", + 8116: "cp-cluster", + 8118: "privoxy", + 8121: "apollo-data", + 8122: "apollo-admin", + 8128: "paycash-online", + 8129: "paycash-wbp", + 8130: "indigo-vrmi", + 8131: "indigo-vbcp", + 8132: "dbabble", + 8148: "isdd", + 8149: "eor-game", + 8160: "patrol", + 8161: "patrol-snmp", + 8182: "vmware-fdm", + 8184: "itach", + 8192: "spytechphone", + 8194: "blp1", + 8195: "blp2", + 8199: "vvr-data", + 8200: "trivnet1", + 8201: "trivnet2", + 8202: "aesop", + 8204: "lm-perfworks", + 8205: "lm-instmgr", + 8206: "lm-dta", + 8207: "lm-sserver", + 8208: "lm-webwatcher", + 8230: "rexecj", + 8231: "hncp-udp-port", + 8232: "hncp-dtls-port", + 8243: "synapse-nhttps", + 8276: "pando-sec", + 8280: "synapse-nhttp", + 8282: "libelle-disc", + 8292: "blp3", + 8294: "blp4", + 8300: "tmi", + 8301: "amberon", + 8320: "tnp-discover", + 8321: "tnp", + 8322: "garmin-marine", + 8351: "server-find", + 8376: "cruise-enum", + 8377: "cruise-swroute", + 8378: "cruise-config", + 8379: "cruise-diags", + 8380: "cruise-update", + 8383: "m2mservices", + 8384: "marathontp", + 8400: "cvd", + 8401: "sabarsd", + 8402: "abarsd", + 8403: "admind", + 8416: "espeech", + 8417: "espeech-rtp", + 8442: "cybro-a-bus", + 8443: "pcsync-https", + 8444: "pcsync-http", + 8445: "copy-disc", + 8450: "npmp", + 8472: "otv", + 8473: "vp2p", + 8474: "noteshare", + 8500: "fmtp", + 8501: "cmtp-av", + 8503: "lsp-self-ping", + 8554: "rtsp-alt", + 8555: "d-fence", + 8567: "dof-tunnel", + 8600: "asterix", + 8609: "canon-cpp-disc", + 8610: "canon-mfnp", + 8611: "canon-bjnp1", + 8612: "canon-bjnp2", + 8613: "canon-bjnp3", + 8614: "canon-bjnp4", + 8675: "msi-cps-rm-disc", + 8686: "sun-as-jmxrmi", + 8732: "dtp-net", + 8733: "ibus", + 8763: "mc-appserver", + 8764: "openqueue", + 8765: "ultraseek-http", + 8766: "amcs", + 8770: "dpap", + 8786: "msgclnt", + 8787: "msgsrvr", + 8793: "acd-pm", + 8800: "sunwebadmin", + 8804: "truecm", + 8805: "pfcp", + 8808: "ssports-bcast", + 8873: "dxspider", + 8880: "cddbp-alt", + 8883: "secure-mqtt", + 8888: "ddi-udp-1", + 8889: "ddi-udp-2", + 8890: "ddi-udp-3", + 8891: "ddi-udp-4", + 8892: "ddi-udp-5", + 8893: "ddi-udp-6", + 8894: "ddi-udp-7", + 8899: "ospf-lite", + 8900: "jmb-cds1", + 8901: "jmb-cds2", + 8910: "manyone-http", + 8911: "manyone-xml", + 8912: "wcbackup", + 8913: "dragonfly", + 8954: "cumulus-admin", + 8980: "nod-provider", + 8981: "nod-client", + 8989: "sunwebadmins", + 8990: "http-wmap", + 8991: "https-wmap", + 8999: "bctp", + 9000: "cslistener", + 9001: "etlservicemgr", + 9002: "dynamid", + 9007: "ogs-client", + 9009: "pichat", + 9020: "tambora", + 9021: "panagolin-ident", + 9022: "paragent", + 9023: "swa-1", + 9024: "swa-2", + 9025: "swa-3", + 9026: "swa-4", + 9060: "CardWeb-RT", + 9080: "glrpc", + 9084: "aurora", + 9085: "ibm-rsyscon", + 9086: "net2display", + 9087: "classic", + 9088: "sqlexec", + 9089: "sqlexec-ssl", + 9090: "websm", + 9091: "xmltec-xmlmail", + 9092: "XmlIpcRegSvc", + 9100: "hp-pdl-datastr", + 9101: "bacula-dir", + 9102: "bacula-fd", + 9103: "bacula-sd", + 9104: "peerwire", + 9105: "xadmin", + 9106: "astergate-disc", + 9119: "mxit", + 9131: "dddp", + 9160: "apani1", + 9161: "apani2", + 9162: "apani3", + 9163: "apani4", + 9164: "apani5", + 9191: "sun-as-jpda", + 9200: "wap-wsp", + 9201: "wap-wsp-wtp", + 9202: "wap-wsp-s", + 9203: "wap-wsp-wtp-s", + 9204: "wap-vcard", + 9205: "wap-vcal", + 9206: "wap-vcard-s", + 9207: "wap-vcal-s", + 9208: "rjcdb-vcards", + 9209: "almobile-system", + 9210: "oma-mlp", + 9211: "oma-mlp-s", + 9212: "serverviewdbms", + 9213: "serverstart", + 9214: "ipdcesgbs", + 9215: "insis", + 9216: "acme", + 9217: "fsc-port", + 9222: "teamcoherence", + 9255: "mon", + 9277: "traingpsdata", + 9278: "pegasus", + 9279: "pegasus-ctl", + 9280: "pgps", + 9281: "swtp-port1", + 9282: "swtp-port2", + 9283: "callwaveiam", + 9284: "visd", + 9285: "n2h2server", + 9286: "n2receive", + 9287: "cumulus", + 9292: "armtechdaemon", + 9293: "storview", + 9294: "armcenterhttp", + 9295: "armcenterhttps", + 9300: "vrace", + 9318: "secure-ts", + 9321: "guibase", + 9343: "mpidcmgr", + 9344: "mphlpdmc", + 9346: "ctechlicensing", + 9374: "fjdmimgr", + 9380: "boxp", + 9396: "fjinvmgr", + 9397: "mpidcagt", + 9400: "sec-t4net-srv", + 9401: "sec-t4net-clt", + 9402: "sec-pc2fax-srv", + 9418: "git", + 9443: "tungsten-https", + 9444: "wso2esb-console", + 9450: "sntlkeyssrvr", + 9500: "ismserver", + 9522: "sma-spw", + 9535: "mngsuite", + 9536: "laes-bf", + 9555: "trispen-sra", + 9592: "ldgateway", + 9593: "cba8", + 9594: "msgsys", + 9595: "pds", + 9596: "mercury-disc", + 9597: "pd-admin", + 9598: "vscp", + 9599: "robix", + 9600: "micromuse-ncpw", + 9612: "streamcomm-ds", + 9618: "condor", + 9628: "odbcpathway", + 9629: "uniport", + 9632: "mc-comm", + 9667: "xmms2", + 9668: "tec5-sdctp", + 9694: "client-wakeup", + 9695: "ccnx", + 9700: "board-roar", + 9747: "l5nas-parchan", + 9750: "board-voip", + 9753: "rasadv", + 9762: "tungsten-http", + 9800: "davsrc", + 9801: "sstp-2", + 9802: "davsrcs", + 9875: "sapv1", + 9878: "kca-service", + 9888: "cyborg-systems", + 9889: "gt-proxy", + 9898: "monkeycom", + 9899: "sctp-tunneling", + 9900: "iua", + 9901: "enrp", + 9903: "multicast-ping", + 9909: "domaintime", + 9911: "sype-transport", + 9950: "apc-9950", + 9951: "apc-9951", + 9952: "apc-9952", + 9953: "acis", + 9955: "alljoyn-mcm", + 9956: "alljoyn", + 9966: "odnsp", + 9987: "dsm-scm-target", + 9990: "osm-appsrvr", + 9991: "osm-oev", + 9992: "palace-1", + 9993: "palace-2", + 9994: "palace-3", + 9995: "palace-4", + 9996: "palace-5", + 9997: "palace-6", + 9998: "distinct32", + 9999: "distinct", + 10000: "ndmp", + 10001: "scp-config", + 10002: "documentum", + 10003: "documentum-s", + 10007: "mvs-capacity", + 10008: "octopus", + 10009: "swdtp-sv", + 10050: "zabbix-agent", + 10051: "zabbix-trapper", + 10080: "amanda", + 10081: "famdc", + 10100: "itap-ddtp", + 10101: "ezmeeting-2", + 10102: "ezproxy-2", + 10103: "ezrelay", + 10104: "swdtp", + 10107: "bctp-server", + 10110: "nmea-0183", + 10111: "nmea-onenet", + 10113: "netiq-endpoint", + 10114: "netiq-qcheck", + 10115: "netiq-endpt", + 10116: "netiq-voipa", + 10117: "iqrm", + 10128: "bmc-perf-sd", + 10160: "qb-db-server", + 10161: "snmpdtls", + 10162: "snmpdtls-trap", + 10200: "trisoap", + 10201: "rscs", + 10252: "apollo-relay", + 10253: "eapol-relay", + 10260: "axis-wimp-port", + 10288: "blocks", + 10439: "bngsync", + 10500: "hip-nat-t", + 10540: "MOS-lower", + 10541: "MOS-upper", + 10542: "MOS-aux", + 10543: "MOS-soap", + 10544: "MOS-soap-opt", + 10800: "gap", + 10805: "lpdg", + 10810: "nmc-disc", + 10860: "helix", + 10880: "bveapi", + 10990: "rmiaux", + 11000: "irisa", + 11001: "metasys", + 10023: "cefd-vmp", + 11095: "weave", + 11106: "sgi-lk", + 11108: "myq-termlink", + 11111: "vce", + 11112: "dicom", + 11161: "suncacao-snmp", + 11162: "suncacao-jmxmp", + 11163: "suncacao-rmi", + 11164: "suncacao-csa", + 11165: "suncacao-websvc", + 11171: "snss", + 11201: "smsqp", + 11208: "wifree", + 11211: "memcache", + 11319: "imip", + 11320: "imip-channels", + 11321: "arena-server", + 11367: "atm-uhas", + 11371: "hkp", + 11430: "lsdp", + 11600: "tempest-port", + 11720: "h323callsigalt", + 11723: "emc-xsw-dcache", + 11751: "intrepid-ssl", + 11796: "lanschool-mpt", + 11876: "xoraya", + 11877: "x2e-disc", + 11967: "sysinfo-sp", + 12000: "entextxid", + 12001: "entextnetwk", + 12002: "entexthigh", + 12003: "entextmed", + 12004: "entextlow", + 12005: "dbisamserver1", + 12006: "dbisamserver2", + 12007: "accuracer", + 12008: "accuracer-dbms", + 12009: "ghvpn", + 12012: "vipera", + 12013: "vipera-ssl", + 12109: "rets-ssl", + 12121: "nupaper-ss", + 12168: "cawas", + 12172: "hivep", + 12300: "linogridengine", + 12321: "warehouse-sss", + 12322: "warehouse", + 12345: "italk", + 12753: "tsaf", + 13160: "i-zipqd", + 13216: "bcslogc", + 13217: "rs-pias", + 13218: "emc-vcas-udp", + 13223: "powwow-client", + 13224: "powwow-server", + 13400: "doip-disc", + 13720: "bprd", + 13721: "bpdbm", + 13722: "bpjava-msvc", + 13724: "vnetd", + 13782: "bpcd", + 13783: "vopied", + 13785: "nbdb", + 13786: "nomdb", + 13818: "dsmcc-config", + 13819: "dsmcc-session", + 13820: "dsmcc-passthru", + 13821: "dsmcc-download", + 13822: "dsmcc-ccp", + 13894: "ucontrol", + 13929: "dta-systems", + 14000: "scotty-ft", + 14001: "sua", + 14002: "scotty-disc", + 14033: "sage-best-com1", + 14034: "sage-best-com2", + 14141: "vcs-app", + 14142: "icpp", + 14145: "gcm-app", + 14149: "vrts-tdd", + 14154: "vad", + 14250: "cps", + 14414: "ca-web-update", + 14936: "hde-lcesrvr-1", + 14937: "hde-lcesrvr-2", + 15000: "hydap", + 15118: "v2g-secc", + 15345: "xpilot", + 15363: "3link", + 15555: "cisco-snat", + 15660: "bex-xr", + 15740: "ptp", + 15998: "2ping", + 16003: "alfin", + 16161: "sun-sea-port", + 16309: "etb4j", + 16310: "pduncs", + 16311: "pdefmns", + 16360: "netserialext1", + 16361: "netserialext2", + 16367: "netserialext3", + 16368: "netserialext4", + 16384: "connected", + 16666: "vtp", + 16900: "newbay-snc-mc", + 16950: "sgcip", + 16991: "intel-rci-mp", + 16992: "amt-soap-http", + 16993: "amt-soap-https", + 16994: "amt-redir-tcp", + 16995: "amt-redir-tls", + 17007: "isode-dua", + 17185: "soundsvirtual", + 17219: "chipper", + 17220: "avtp", + 17221: "avdecc", + 17222: "cpsp", + 17224: "trdp-pd", + 17225: "trdp-md", + 17234: "integrius-stp", + 17235: "ssh-mgmt", + 17500: "db-lsp-disc", + 17729: "ea", + 17754: "zep", + 17755: "zigbee-ip", + 17756: "zigbee-ips", + 18000: "biimenu", + 18181: "opsec-cvp", + 18182: "opsec-ufp", + 18183: "opsec-sam", + 18184: "opsec-lea", + 18185: "opsec-omi", + 18186: "ohsc", + 18187: "opsec-ela", + 18241: "checkpoint-rtm", + 18262: "gv-pf", + 18463: "ac-cluster", + 18634: "rds-ib", + 18635: "rds-ip", + 18668: "vdmmesh-disc", + 18769: "ique", + 18881: "infotos", + 18888: "apc-necmp", + 19000: "igrid", + 19007: "scintilla", + 19191: "opsec-uaa", + 19194: "ua-secureagent", + 19220: "cora-disc", + 19283: "keysrvr", + 19315: "keyshadow", + 19398: "mtrgtrans", + 19410: "hp-sco", + 19411: "hp-sca", + 19412: "hp-sessmon", + 19539: "fxuptp", + 19540: "sxuptp", + 19541: "jcp", + 19788: "mle", + 19999: "dnp-sec", + 20000: "dnp", + 20001: "microsan", + 20002: "commtact-http", + 20003: "commtact-https", + 20005: "openwebnet", + 20012: "ss-idi-disc", + 20014: "opendeploy", + 20034: "nburn-id", + 20046: "tmophl7mts", + 20048: "mountd", + 20049: "nfsrdma", + 20167: "tolfab", + 20202: "ipdtp-port", + 20222: "ipulse-ics", + 20480: "emwavemsg", + 20670: "track", + 20999: "athand-mmp", + 21000: "irtrans", + 21554: "dfserver", + 21590: "vofr-gateway", + 21800: "tvpm", + 21845: "webphone", + 21846: "netspeak-is", + 21847: "netspeak-cs", + 21848: "netspeak-acd", + 21849: "netspeak-cps", + 22000: "snapenetio", + 22001: "optocontrol", + 22002: "optohost002", + 22003: "optohost003", + 22004: "optohost004", + 22005: "optohost004", + 22273: "wnn6", + 22305: "cis", + 22335: "shrewd-stream", + 22343: "cis-secure", + 22347: "wibukey", + 22350: "codemeter", + 22555: "vocaltec-phone", + 22763: "talikaserver", + 22800: "aws-brf", + 22951: "brf-gw", + 23000: "inovaport1", + 23001: "inovaport2", + 23002: "inovaport3", + 23003: "inovaport4", + 23004: "inovaport5", + 23005: "inovaport6", + 23272: "s102", + 23294: "5afe-disc", + 23333: "elxmgmt", + 23400: "novar-dbase", + 23401: "novar-alarm", + 23402: "novar-global", + 24000: "med-ltp", + 24001: "med-fsp-rx", + 24002: "med-fsp-tx", + 24003: "med-supp", + 24004: "med-ovw", + 24005: "med-ci", + 24006: "med-net-svc", + 24242: "filesphere", + 24249: "vista-4gl", + 24321: "ild", + 24322: "hid", + 24386: "intel-rci", + 24465: "tonidods", + 24554: "binkp", + 24577: "bilobit-update", + 24676: "canditv", + 24677: "flashfiler", + 24678: "proactivate", + 24680: "tcc-http", + 24850: "assoc-disc", + 24922: "find", + 25000: "icl-twobase1", + 25001: "icl-twobase2", + 25002: "icl-twobase3", + 25003: "icl-twobase4", + 25004: "icl-twobase5", + 25005: "icl-twobase6", + 25006: "icl-twobase7", + 25007: "icl-twobase8", + 25008: "icl-twobase9", + 25009: "icl-twobase10", + 25793: "vocaltec-hos", + 25900: "tasp-net", + 25901: "niobserver", + 25902: "nilinkanalyst", + 25903: "niprobe", + 25954: "bf-game", + 25955: "bf-master", + 26000: "quake", + 26133: "scscp", + 26208: "wnn6-ds", + 26260: "ezproxy", + 26261: "ezmeeting", + 26262: "k3software-svr", + 26263: "k3software-cli", + 26486: "exoline-udp", + 26487: "exoconfig", + 26489: "exonet", + 27345: "imagepump", + 27442: "jesmsjc", + 27504: "kopek-httphead", + 27782: "ars-vista", + 27999: "tw-auth-key", + 28000: "nxlmd", + 28119: "a27-ran-ran", + 28200: "voxelstorm", + 28240: "siemensgsm", + 29167: "otmp", + 30001: "pago-services1", + 30002: "pago-services2", + 30003: "amicon-fpsu-ra", + 30004: "amicon-fpsu-s", + 30260: "kingdomsonline", + 30832: "samsung-disc", + 30999: "ovobs", + 31016: "ka-kdp", + 31029: "yawn", + 31416: "xqosd", + 31457: "tetrinet", + 31620: "lm-mon", + 31765: "gamesmith-port", + 31948: "iceedcp-tx", + 31949: "iceedcp-rx", + 32034: "iracinghelper", + 32249: "t1distproc60", + 32483: "apm-link", + 32635: "sec-ntb-clnt", + 32636: "DMExpress", + 32767: "filenet-powsrm", + 32768: "filenet-tms", + 32769: "filenet-rpc", + 32770: "filenet-nch", + 32771: "filenet-rmi", + 32772: "filenet-pa", + 32773: "filenet-cm", + 32774: "filenet-re", + 32775: "filenet-pch", + 32776: "filenet-peior", + 32777: "filenet-obrok", + 32801: "mlsn", + 32896: "idmgratm", + 33123: "aurora-balaena", + 33331: "diamondport", + 33334: "speedtrace-disc", + 33434: "traceroute", + 33656: "snip-slave", + 34249: "turbonote-2", + 34378: "p-net-local", + 34379: "p-net-remote", + 34567: "edi_service", + 34962: "profinet-rt", + 34963: "profinet-rtm", + 34964: "profinet-cm", + 34980: "ethercat", + 35001: "rt-viewer", + 35004: "rt-classmanager", + 35100: "axio-disc", + 35355: "altova-lm-disc", + 36001: "allpeers", + 36411: "wlcp", + 36865: "kastenxpipe", + 37475: "neckar", + 37654: "unisys-eportal", + 38002: "crescoctrl-disc", + 38201: "galaxy7-data", + 38202: "fairview", + 38203: "agpolicy", + 39681: "turbonote-1", + 40000: "safetynetp", + 40023: "k-patentssensor", + 40841: "cscp", + 40842: "csccredir", + 40843: "csccfirewall", + 40853: "ortec-disc", + 41111: "fs-qos", + 41230: "z-wave-s", + 41794: "crestron-cip", + 41795: "crestron-ctp", + 42508: "candp", + 42509: "candrp", + 42510: "caerpc", + 43000: "recvr-rc-disc", + 43188: "reachout", + 43189: "ndm-agent-port", + 43190: "ip-provision", + 43210: "shaperai-disc", + 43439: "eq3-config", + 43440: "ew-disc-cmd", + 43441: "ciscocsdb", + 44321: "pmcd", + 44322: "pmcdproxy", + 44544: "domiq", + 44553: "rbr-debug", + 44600: "asihpi", + 44818: "EtherNet-IP-2", + 44900: "m3da-disc", + 45000: "asmp-mon", + 45054: "invision-ag", + 45514: "cloudcheck-ping", + 45678: "eba", + 45825: "qdb2service", + 45966: "ssr-servermgr", + 46999: "mediabox", + 47000: "mbus", + 47100: "jvl-mactalk", + 47557: "dbbrowse", + 47624: "directplaysrvr", + 47806: "ap", + 47808: "bacnet", + 47809: "presonus-ucnet", + 48000: "nimcontroller", + 48001: "nimspooler", + 48002: "nimhub", + 48003: "nimgtw", + 48128: "isnetserv", + 48129: "blp5", + 48556: "com-bardac-dw", + 48619: "iqobject", + 48653: "robotraconteur", + 49001: "nusdp-disc", +} +var sctpPortNames = map[SCTPPort]string{ + 9: "discard", + 20: "ftp-data", + 21: "ftp", + 22: "ssh", + 80: "http", + 179: "bgp", + 443: "https", + 1021: "exp1", + 1022: "exp2", + 1167: "cisco-ipsla", + 1720: "h323hostcall", + 2049: "nfs", + 2225: "rcip-itu", + 2904: "m2ua", + 2905: "m3ua", + 2944: "megaco-h248", + 2945: "h248-binary", + 3097: "itu-bicc-stc", + 3565: "m2pa", + 3863: "asap-sctp", + 3864: "asap-sctp-tls", + 3868: "diameter", + 4333: "ahsp", + 4502: "a25-fap-fgw", + 4711: "trinity-dist", + 4739: "ipfix", + 4740: "ipfixs", + 5060: "sip", + 5061: "sips", + 5090: "car", + 5091: "cxtp", + 5215: "noteza", + 5445: "smbdirect", + 5672: "amqp", + 5675: "v5ua", + 5868: "diameters", + 5910: "cm", + 5911: "cpdlc", + 5912: "fis", + 5913: "ads-c", + 6704: "frc-hp", + 6705: "frc-mp", + 6706: "frc-lp", + 6970: "conductor-mpx", + 7626: "simco", + 7701: "nfapi", + 7728: "osvr", + 8471: "pim-port", + 9082: "lcs-ap", + 9084: "aurora", + 9900: "iua", + 9901: "enrp-sctp", + 9902: "enrp-sctp-tls", + 11997: "wmereceiving", + 11998: "wmedistribution", + 11999: "wmereporting", + 14001: "sua", + 20049: "nfsrdma", + 25471: "rna", + 29118: "sgsap", + 29168: "sbcap", + 29169: "iuhsctpassoc", + 30100: "rwp", + 36412: "s1-control", + 36422: "x2-control", + 36423: "slmap", + 36424: "nq-ap", + 36443: "m2ap", + 36444: "m3ap", + 36462: "xw-control", + 38412: "ng-control", + 38422: "xn-control", + 38472: "f1-control", +} diff --git a/vendor/github.com/google/gopacket/layers/icmp4.go b/vendor/github.com/google/gopacket/layers/icmp4.go new file mode 100644 index 0000000000..bd3f03f00c --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/icmp4.go @@ -0,0 +1,267 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "reflect" + + "github.com/google/gopacket" +) + +const ( + ICMPv4TypeEchoReply = 0 + ICMPv4TypeDestinationUnreachable = 3 + ICMPv4TypeSourceQuench = 4 + ICMPv4TypeRedirect = 5 + ICMPv4TypeEchoRequest = 8 + ICMPv4TypeRouterAdvertisement = 9 + ICMPv4TypeRouterSolicitation = 10 + ICMPv4TypeTimeExceeded = 11 + ICMPv4TypeParameterProblem = 12 + ICMPv4TypeTimestampRequest = 13 + ICMPv4TypeTimestampReply = 14 + ICMPv4TypeInfoRequest = 15 + ICMPv4TypeInfoReply = 16 + ICMPv4TypeAddressMaskRequest = 17 + ICMPv4TypeAddressMaskReply = 18 +) + +const ( + // DestinationUnreachable + ICMPv4CodeNet = 0 + ICMPv4CodeHost = 1 + ICMPv4CodeProtocol = 2 + ICMPv4CodePort = 3 + ICMPv4CodeFragmentationNeeded = 4 + ICMPv4CodeSourceRoutingFailed = 5 + ICMPv4CodeNetUnknown = 6 + ICMPv4CodeHostUnknown = 7 + ICMPv4CodeSourceIsolated = 8 + ICMPv4CodeNetAdminProhibited = 9 + ICMPv4CodeHostAdminProhibited = 10 + ICMPv4CodeNetTOS = 11 + ICMPv4CodeHostTOS = 12 + ICMPv4CodeCommAdminProhibited = 13 + ICMPv4CodeHostPrecedence = 14 + ICMPv4CodePrecedenceCutoff = 15 + + // TimeExceeded + ICMPv4CodeTTLExceeded = 0 + ICMPv4CodeFragmentReassemblyTimeExceeded = 1 + + // ParameterProblem + ICMPv4CodePointerIndicatesError = 0 + ICMPv4CodeMissingOption = 1 + ICMPv4CodeBadLength = 2 + + // Redirect + // ICMPv4CodeNet = same as for DestinationUnreachable + // ICMPv4CodeHost = same as for DestinationUnreachable + ICMPv4CodeTOSNet = 2 + ICMPv4CodeTOSHost = 3 +) + +type icmpv4TypeCodeInfoStruct struct { + typeStr string + codeStr *map[uint8]string +} + +var ( + icmpv4TypeCodeInfo = map[uint8]icmpv4TypeCodeInfoStruct{ + ICMPv4TypeDestinationUnreachable: icmpv4TypeCodeInfoStruct{ + "DestinationUnreachable", &map[uint8]string{ + ICMPv4CodeNet: "Net", + ICMPv4CodeHost: "Host", + ICMPv4CodeProtocol: "Protocol", + ICMPv4CodePort: "Port", + ICMPv4CodeFragmentationNeeded: "FragmentationNeeded", + ICMPv4CodeSourceRoutingFailed: "SourceRoutingFailed", + ICMPv4CodeNetUnknown: "NetUnknown", + ICMPv4CodeHostUnknown: "HostUnknown", + ICMPv4CodeSourceIsolated: "SourceIsolated", + ICMPv4CodeNetAdminProhibited: "NetAdminProhibited", + ICMPv4CodeHostAdminProhibited: "HostAdminProhibited", + ICMPv4CodeNetTOS: "NetTOS", + ICMPv4CodeHostTOS: "HostTOS", + ICMPv4CodeCommAdminProhibited: "CommAdminProhibited", + ICMPv4CodeHostPrecedence: "HostPrecedence", + ICMPv4CodePrecedenceCutoff: "PrecedenceCutoff", + }, + }, + ICMPv4TypeTimeExceeded: icmpv4TypeCodeInfoStruct{ + "TimeExceeded", &map[uint8]string{ + ICMPv4CodeTTLExceeded: "TTLExceeded", + ICMPv4CodeFragmentReassemblyTimeExceeded: "FragmentReassemblyTimeExceeded", + }, + }, + ICMPv4TypeParameterProblem: icmpv4TypeCodeInfoStruct{ + "ParameterProblem", &map[uint8]string{ + ICMPv4CodePointerIndicatesError: "PointerIndicatesError", + ICMPv4CodeMissingOption: "MissingOption", + ICMPv4CodeBadLength: "BadLength", + }, + }, + ICMPv4TypeSourceQuench: icmpv4TypeCodeInfoStruct{ + "SourceQuench", nil, + }, + ICMPv4TypeRedirect: icmpv4TypeCodeInfoStruct{ + "Redirect", &map[uint8]string{ + ICMPv4CodeNet: "Net", + ICMPv4CodeHost: "Host", + ICMPv4CodeTOSNet: "TOS+Net", + ICMPv4CodeTOSHost: "TOS+Host", + }, + }, + ICMPv4TypeEchoRequest: icmpv4TypeCodeInfoStruct{ + "EchoRequest", nil, + }, + ICMPv4TypeEchoReply: icmpv4TypeCodeInfoStruct{ + "EchoReply", nil, + }, + ICMPv4TypeTimestampRequest: icmpv4TypeCodeInfoStruct{ + "TimestampRequest", nil, + }, + ICMPv4TypeTimestampReply: icmpv4TypeCodeInfoStruct{ + "TimestampReply", nil, + }, + ICMPv4TypeInfoRequest: icmpv4TypeCodeInfoStruct{ + "InfoRequest", nil, + }, + ICMPv4TypeInfoReply: icmpv4TypeCodeInfoStruct{ + "InfoReply", nil, + }, + ICMPv4TypeRouterSolicitation: icmpv4TypeCodeInfoStruct{ + "RouterSolicitation", nil, + }, + ICMPv4TypeRouterAdvertisement: icmpv4TypeCodeInfoStruct{ + "RouterAdvertisement", nil, + }, + ICMPv4TypeAddressMaskRequest: icmpv4TypeCodeInfoStruct{ + "AddressMaskRequest", nil, + }, + ICMPv4TypeAddressMaskReply: icmpv4TypeCodeInfoStruct{ + "AddressMaskReply", nil, + }, + } +) + +type ICMPv4TypeCode uint16 + +// Type returns the ICMPv4 type field. +func (a ICMPv4TypeCode) Type() uint8 { + return uint8(a >> 8) +} + +// Code returns the ICMPv4 code field. +func (a ICMPv4TypeCode) Code() uint8 { + return uint8(a) +} + +func (a ICMPv4TypeCode) String() string { + t, c := a.Type(), a.Code() + strInfo, ok := icmpv4TypeCodeInfo[t] + if !ok { + // Unknown ICMPv4 type field + return fmt.Sprintf("%d(%d)", t, c) + } + typeStr := strInfo.typeStr + if strInfo.codeStr == nil && c == 0 { + // The ICMPv4 type does not make use of the code field + return fmt.Sprintf("%s", strInfo.typeStr) + } + if strInfo.codeStr == nil && c != 0 { + // The ICMPv4 type does not make use of the code field, but it is present anyway + return fmt.Sprintf("%s(Code: %d)", typeStr, c) + } + codeStr, ok := (*strInfo.codeStr)[c] + if !ok { + // We don't know this ICMPv4 code; print the numerical value + return fmt.Sprintf("%s(Code: %d)", typeStr, c) + } + return fmt.Sprintf("%s(%s)", typeStr, codeStr) +} + +func (a ICMPv4TypeCode) GoString() string { + t := reflect.TypeOf(a) + return fmt.Sprintf("%s(%d, %d)", t.String(), a.Type(), a.Code()) +} + +// SerializeTo writes the ICMPv4TypeCode value to the 'bytes' buffer. +func (a ICMPv4TypeCode) SerializeTo(bytes []byte) { + binary.BigEndian.PutUint16(bytes, uint16(a)) +} + +// CreateICMPv4TypeCode is a convenience function to create an ICMPv4TypeCode +// gopacket type from the ICMPv4 type and code values. +func CreateICMPv4TypeCode(typ uint8, code uint8) ICMPv4TypeCode { + return ICMPv4TypeCode(binary.BigEndian.Uint16([]byte{typ, code})) +} + +// ICMPv4 is the layer for IPv4 ICMP packet data. +type ICMPv4 struct { + BaseLayer + TypeCode ICMPv4TypeCode + Checksum uint16 + Id uint16 + Seq uint16 +} + +// LayerType returns LayerTypeICMPv4. +func (i *ICMPv4) LayerType() gopacket.LayerType { return LayerTypeICMPv4 } + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + df.SetTruncated() + return errors.New("ICMP layer less then 8 bytes for ICMPv4 packet") + } + i.TypeCode = CreateICMPv4TypeCode(data[0], data[1]) + i.Checksum = binary.BigEndian.Uint16(data[2:4]) + i.Id = binary.BigEndian.Uint16(data[4:6]) + i.Seq = binary.BigEndian.Uint16(data[6:8]) + i.BaseLayer = BaseLayer{data[:8], data[8:]} + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(8) + if err != nil { + return err + } + i.TypeCode.SerializeTo(bytes) + binary.BigEndian.PutUint16(bytes[4:], i.Id) + binary.BigEndian.PutUint16(bytes[6:], i.Seq) + if opts.ComputeChecksums { + bytes[2] = 0 + bytes[3] = 0 + i.Checksum = tcpipChecksum(b.Bytes(), 0) + } + binary.BigEndian.PutUint16(bytes[2:], i.Checksum) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv4) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv4 +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv4) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func decodeICMPv4(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv4{} + return decodingLayerDecoder(i, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/icmp6.go b/vendor/github.com/google/gopacket/layers/icmp6.go new file mode 100644 index 0000000000..09afd11a60 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/icmp6.go @@ -0,0 +1,266 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "reflect" + + "github.com/google/gopacket" +) + +const ( + // The following are from RFC 4443 + ICMPv6TypeDestinationUnreachable = 1 + ICMPv6TypePacketTooBig = 2 + ICMPv6TypeTimeExceeded = 3 + ICMPv6TypeParameterProblem = 4 + ICMPv6TypeEchoRequest = 128 + ICMPv6TypeEchoReply = 129 + + // The following are from RFC 4861 + ICMPv6TypeRouterSolicitation = 133 + ICMPv6TypeRouterAdvertisement = 134 + ICMPv6TypeNeighborSolicitation = 135 + ICMPv6TypeNeighborAdvertisement = 136 + ICMPv6TypeRedirect = 137 + + // The following are from RFC 2710 + ICMPv6TypeMLDv1MulticastListenerQueryMessage = 130 + ICMPv6TypeMLDv1MulticastListenerReportMessage = 131 + ICMPv6TypeMLDv1MulticastListenerDoneMessage = 132 + + // The following are from RFC 3810 + ICMPv6TypeMLDv2MulticastListenerReportMessageV2 = 143 +) + +const ( + // DestinationUnreachable + ICMPv6CodeNoRouteToDst = 0 + ICMPv6CodeAdminProhibited = 1 + ICMPv6CodeBeyondScopeOfSrc = 2 + ICMPv6CodeAddressUnreachable = 3 + ICMPv6CodePortUnreachable = 4 + ICMPv6CodeSrcAddressFailedPolicy = 5 + ICMPv6CodeRejectRouteToDst = 6 + + // TimeExceeded + ICMPv6CodeHopLimitExceeded = 0 + ICMPv6CodeFragmentReassemblyTimeExceeded = 1 + + // ParameterProblem + ICMPv6CodeErroneousHeaderField = 0 + ICMPv6CodeUnrecognizedNextHeader = 1 + ICMPv6CodeUnrecognizedIPv6Option = 2 +) + +type icmpv6TypeCodeInfoStruct struct { + typeStr string + codeStr *map[uint8]string +} + +var ( + icmpv6TypeCodeInfo = map[uint8]icmpv6TypeCodeInfoStruct{ + ICMPv6TypeDestinationUnreachable: icmpv6TypeCodeInfoStruct{ + "DestinationUnreachable", &map[uint8]string{ + ICMPv6CodeNoRouteToDst: "NoRouteToDst", + ICMPv6CodeAdminProhibited: "AdminProhibited", + ICMPv6CodeBeyondScopeOfSrc: "BeyondScopeOfSrc", + ICMPv6CodeAddressUnreachable: "AddressUnreachable", + ICMPv6CodePortUnreachable: "PortUnreachable", + ICMPv6CodeSrcAddressFailedPolicy: "SrcAddressFailedPolicy", + ICMPv6CodeRejectRouteToDst: "RejectRouteToDst", + }, + }, + ICMPv6TypePacketTooBig: icmpv6TypeCodeInfoStruct{ + "PacketTooBig", nil, + }, + ICMPv6TypeTimeExceeded: icmpv6TypeCodeInfoStruct{ + "TimeExceeded", &map[uint8]string{ + ICMPv6CodeHopLimitExceeded: "HopLimitExceeded", + ICMPv6CodeFragmentReassemblyTimeExceeded: "FragmentReassemblyTimeExceeded", + }, + }, + ICMPv6TypeParameterProblem: icmpv6TypeCodeInfoStruct{ + "ParameterProblem", &map[uint8]string{ + ICMPv6CodeErroneousHeaderField: "ErroneousHeaderField", + ICMPv6CodeUnrecognizedNextHeader: "UnrecognizedNextHeader", + ICMPv6CodeUnrecognizedIPv6Option: "UnrecognizedIPv6Option", + }, + }, + ICMPv6TypeEchoRequest: icmpv6TypeCodeInfoStruct{ + "EchoRequest", nil, + }, + ICMPv6TypeEchoReply: icmpv6TypeCodeInfoStruct{ + "EchoReply", nil, + }, + ICMPv6TypeRouterSolicitation: icmpv6TypeCodeInfoStruct{ + "RouterSolicitation", nil, + }, + ICMPv6TypeRouterAdvertisement: icmpv6TypeCodeInfoStruct{ + "RouterAdvertisement", nil, + }, + ICMPv6TypeNeighborSolicitation: icmpv6TypeCodeInfoStruct{ + "NeighborSolicitation", nil, + }, + ICMPv6TypeNeighborAdvertisement: icmpv6TypeCodeInfoStruct{ + "NeighborAdvertisement", nil, + }, + ICMPv6TypeRedirect: icmpv6TypeCodeInfoStruct{ + "Redirect", nil, + }, + } +) + +type ICMPv6TypeCode uint16 + +// Type returns the ICMPv6 type field. +func (a ICMPv6TypeCode) Type() uint8 { + return uint8(a >> 8) +} + +// Code returns the ICMPv6 code field. +func (a ICMPv6TypeCode) Code() uint8 { + return uint8(a) +} + +func (a ICMPv6TypeCode) String() string { + t, c := a.Type(), a.Code() + strInfo, ok := icmpv6TypeCodeInfo[t] + if !ok { + // Unknown ICMPv6 type field + return fmt.Sprintf("%d(%d)", t, c) + } + typeStr := strInfo.typeStr + if strInfo.codeStr == nil && c == 0 { + // The ICMPv6 type does not make use of the code field + return fmt.Sprintf("%s", strInfo.typeStr) + } + if strInfo.codeStr == nil && c != 0 { + // The ICMPv6 type does not make use of the code field, but it is present anyway + return fmt.Sprintf("%s(Code: %d)", typeStr, c) + } + codeStr, ok := (*strInfo.codeStr)[c] + if !ok { + // We don't know this ICMPv6 code; print the numerical value + return fmt.Sprintf("%s(Code: %d)", typeStr, c) + } + return fmt.Sprintf("%s(%s)", typeStr, codeStr) +} + +func (a ICMPv6TypeCode) GoString() string { + t := reflect.TypeOf(a) + return fmt.Sprintf("%s(%d, %d)", t.String(), a.Type(), a.Code()) +} + +// SerializeTo writes the ICMPv6TypeCode value to the 'bytes' buffer. +func (a ICMPv6TypeCode) SerializeTo(bytes []byte) { + binary.BigEndian.PutUint16(bytes, uint16(a)) +} + +// CreateICMPv6TypeCode is a convenience function to create an ICMPv6TypeCode +// gopacket type from the ICMPv6 type and code values. +func CreateICMPv6TypeCode(typ uint8, code uint8) ICMPv6TypeCode { + return ICMPv6TypeCode(binary.BigEndian.Uint16([]byte{typ, code})) +} + +// ICMPv6 is the layer for IPv6 ICMP packet data +type ICMPv6 struct { + BaseLayer + TypeCode ICMPv6TypeCode + Checksum uint16 + // TypeBytes is deprecated and always nil. See the different ICMPv6 message types + // instead (e.g. ICMPv6TypeRouterSolicitation). + TypeBytes []byte + tcpipchecksum +} + +// LayerType returns LayerTypeICMPv6. +func (i *ICMPv6) LayerType() gopacket.LayerType { return LayerTypeICMPv6 } + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return errors.New("ICMP layer less then 4 bytes for ICMPv6 packet") + } + i.TypeCode = CreateICMPv6TypeCode(data[0], data[1]) + i.Checksum = binary.BigEndian.Uint16(data[2:4]) + i.BaseLayer = BaseLayer{data[:4], data[4:]} + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(4) + if err != nil { + return err + } + i.TypeCode.SerializeTo(bytes) + + if opts.ComputeChecksums { + bytes[2] = 0 + bytes[3] = 0 + csum, err := i.computeChecksum(b.Bytes(), IPProtocolICMPv6) + if err != nil { + return err + } + i.Checksum = csum + } + binary.BigEndian.PutUint16(bytes[2:], i.Checksum) + + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv6) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv6 +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv6) NextLayerType() gopacket.LayerType { + switch i.TypeCode.Type() { + case ICMPv6TypeEchoRequest: + return LayerTypeICMPv6Echo + case ICMPv6TypeEchoReply: + return LayerTypeICMPv6Echo + case ICMPv6TypeRouterSolicitation: + return LayerTypeICMPv6RouterSolicitation + case ICMPv6TypeRouterAdvertisement: + return LayerTypeICMPv6RouterAdvertisement + case ICMPv6TypeNeighborSolicitation: + return LayerTypeICMPv6NeighborSolicitation + case ICMPv6TypeNeighborAdvertisement: + return LayerTypeICMPv6NeighborAdvertisement + case ICMPv6TypeRedirect: + return LayerTypeICMPv6Redirect + case ICMPv6TypeMLDv1MulticastListenerQueryMessage: // Same Code for MLDv1 Query and MLDv2 Query + if len(i.Payload) > 20 { // Only payload size differs + return LayerTypeMLDv2MulticastListenerQuery + } else { + return LayerTypeMLDv1MulticastListenerQuery + } + case ICMPv6TypeMLDv1MulticastListenerDoneMessage: + return LayerTypeMLDv1MulticastListenerDone + case ICMPv6TypeMLDv1MulticastListenerReportMessage: + return LayerTypeMLDv1MulticastListenerReport + case ICMPv6TypeMLDv2MulticastListenerReportMessageV2: + return LayerTypeMLDv2MulticastListenerReport + } + + return gopacket.LayerTypePayload +} + +func decodeICMPv6(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv6{} + return decodingLayerDecoder(i, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/icmp6msg.go b/vendor/github.com/google/gopacket/layers/icmp6msg.go new file mode 100644 index 0000000000..d9268db056 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/icmp6msg.go @@ -0,0 +1,578 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "net" + "time" + + "github.com/google/gopacket" +) + +// Based on RFC 4861 + +// ICMPv6Opt indicate how to decode the data associated with each ICMPv6Option. +type ICMPv6Opt uint8 + +const ( + _ ICMPv6Opt = iota + + // ICMPv6OptSourceAddress contains the link-layer address of the sender of + // the packet. It is used in the Neighbor Solicitation, Router + // Solicitation, and Router Advertisement packets. Must be ignored for other + // Neighbor discovery messages. + ICMPv6OptSourceAddress + + // ICMPv6OptTargetAddress contains the link-layer address of the target. It + // is used in Neighbor Advertisement and Redirect packets. Must be ignored + // for other Neighbor discovery messages. + ICMPv6OptTargetAddress + + // ICMPv6OptPrefixInfo provides hosts with on-link prefixes and prefixes + // for Address Autoconfiguration. The Prefix Information option appears in + // Router Advertisement packets and MUST be silently ignored for other + // messages. + ICMPv6OptPrefixInfo + + // ICMPv6OptRedirectedHeader is used in Redirect messages and contains all + // or part of the packet that is being redirected. + ICMPv6OptRedirectedHeader + + // ICMPv6OptMTU is used in Router Advertisement messages to ensure that all + // nodes on a link use the same MTU value in those cases where the link MTU + // is not well known. This option MUST be silently ignored for other + // Neighbor Discovery messages. + ICMPv6OptMTU +) + +// ICMPv6Echo represents the structure of a ping. +type ICMPv6Echo struct { + BaseLayer + Identifier uint16 + SeqNumber uint16 +} + +// ICMPv6RouterSolicitation is sent by hosts to find routers. +type ICMPv6RouterSolicitation struct { + BaseLayer + Options ICMPv6Options +} + +// ICMPv6RouterAdvertisement is sent by routers in response to Solicitation. +type ICMPv6RouterAdvertisement struct { + BaseLayer + HopLimit uint8 + Flags uint8 + RouterLifetime uint16 + ReachableTime uint32 + RetransTimer uint32 + Options ICMPv6Options +} + +// ICMPv6NeighborSolicitation is sent to request the link-layer address of a +// target node. +type ICMPv6NeighborSolicitation struct { + BaseLayer + TargetAddress net.IP + Options ICMPv6Options +} + +// ICMPv6NeighborAdvertisement is sent by nodes in response to Solicitation. +type ICMPv6NeighborAdvertisement struct { + BaseLayer + Flags uint8 + TargetAddress net.IP + Options ICMPv6Options +} + +// ICMPv6Redirect is sent by routers to inform hosts of a better first-hop node +// on the path to a destination. +type ICMPv6Redirect struct { + BaseLayer + TargetAddress net.IP + DestinationAddress net.IP + Options ICMPv6Options +} + +// ICMPv6Option contains the type and data for a single option. +type ICMPv6Option struct { + Type ICMPv6Opt + Data []byte +} + +// ICMPv6Options is a slice of ICMPv6Option. +type ICMPv6Options []ICMPv6Option + +func (i ICMPv6Opt) String() string { + switch i { + case ICMPv6OptSourceAddress: + return "SourceAddress" + case ICMPv6OptTargetAddress: + return "TargetAddress" + case ICMPv6OptPrefixInfo: + return "PrefixInfo" + case ICMPv6OptRedirectedHeader: + return "RedirectedHeader" + case ICMPv6OptMTU: + return "MTU" + default: + return fmt.Sprintf("Unknown(%d)", i) + } +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv6Echo) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv6Echo +} + +// LayerType returns LayerTypeICMPv6Echo. +func (i *ICMPv6Echo) LayerType() gopacket.LayerType { + return LayerTypeICMPv6Echo +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv6Echo) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6Echo) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return errors.New("ICMP layer less then 4 bytes for ICMPv6 Echo") + } + i.Identifier = binary.BigEndian.Uint16(data[0:2]) + i.SeqNumber = binary.BigEndian.Uint16(data[2:4]) + + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6Echo) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(4) + if err != nil { + return err + } + + binary.BigEndian.PutUint16(buf, i.Identifier) + binary.BigEndian.PutUint16(buf[2:], i.SeqNumber) + return nil +} + +// LayerType returns LayerTypeICMPv6. +func (i *ICMPv6RouterSolicitation) LayerType() gopacket.LayerType { + return LayerTypeICMPv6RouterSolicitation +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv6RouterSolicitation) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6RouterSolicitation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + // first 4 bytes are reserved followed by options + if len(data) < 4 { + df.SetTruncated() + return errors.New("ICMP layer less then 4 bytes for ICMPv6 router solicitation") + } + + // truncate old options + i.Options = i.Options[:0] + + return i.Options.DecodeFromBytes(data[4:], df) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6RouterSolicitation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if err := i.Options.SerializeTo(b, opts); err != nil { + return err + } + + buf, err := b.PrependBytes(4) + if err != nil { + return err + } + + copy(buf, lotsOfZeros[:4]) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv6RouterSolicitation) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv6RouterSolicitation +} + +// LayerType returns LayerTypeICMPv6RouterAdvertisement. +func (i *ICMPv6RouterAdvertisement) LayerType() gopacket.LayerType { + return LayerTypeICMPv6RouterAdvertisement +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv6RouterAdvertisement) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6RouterAdvertisement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 12 { + df.SetTruncated() + return errors.New("ICMP layer less then 12 bytes for ICMPv6 router advertisement") + } + + i.HopLimit = uint8(data[0]) + // M, O bit followed by 6 reserved bits + i.Flags = uint8(data[1]) + i.RouterLifetime = binary.BigEndian.Uint16(data[2:4]) + i.ReachableTime = binary.BigEndian.Uint32(data[4:8]) + i.RetransTimer = binary.BigEndian.Uint32(data[8:12]) + i.BaseLayer = BaseLayer{data, nil} // assume no payload + + // truncate old options + i.Options = i.Options[:0] + + return i.Options.DecodeFromBytes(data[12:], df) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6RouterAdvertisement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if err := i.Options.SerializeTo(b, opts); err != nil { + return err + } + + buf, err := b.PrependBytes(12) + if err != nil { + return err + } + + buf[0] = byte(i.HopLimit) + buf[1] = byte(i.Flags) + binary.BigEndian.PutUint16(buf[2:], i.RouterLifetime) + binary.BigEndian.PutUint32(buf[4:], i.ReachableTime) + binary.BigEndian.PutUint32(buf[8:], i.RetransTimer) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv6RouterAdvertisement) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv6RouterAdvertisement +} + +// ManagedAddressConfig is true when addresses are available via DHCPv6. If +// set, the OtherConfig flag is redundant. +func (i *ICMPv6RouterAdvertisement) ManagedAddressConfig() bool { + return i.Flags&0x80 != 0 +} + +// OtherConfig is true when there is other configuration information available +// via DHCPv6. For example, DNS-related information. +func (i *ICMPv6RouterAdvertisement) OtherConfig() bool { + return i.Flags&0x40 != 0 +} + +// LayerType returns LayerTypeICMPv6NeighborSolicitation. +func (i *ICMPv6NeighborSolicitation) LayerType() gopacket.LayerType { + return LayerTypeICMPv6NeighborSolicitation +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv6NeighborSolicitation) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6NeighborSolicitation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 20 { + df.SetTruncated() + return errors.New("ICMP layer less then 20 bytes for ICMPv6 neighbor solicitation") + } + + i.TargetAddress = net.IP(data[4:20]) + i.BaseLayer = BaseLayer{data, nil} // assume no payload + + // truncate old options + i.Options = i.Options[:0] + + return i.Options.DecodeFromBytes(data[20:], df) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6NeighborSolicitation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if err := i.Options.SerializeTo(b, opts); err != nil { + return err + } + + buf, err := b.PrependBytes(20) + if err != nil { + return err + } + + copy(buf, lotsOfZeros[:4]) + copy(buf[4:], i.TargetAddress) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv6NeighborSolicitation) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv6NeighborSolicitation +} + +// LayerType returns LayerTypeICMPv6NeighborAdvertisement. +func (i *ICMPv6NeighborAdvertisement) LayerType() gopacket.LayerType { + return LayerTypeICMPv6NeighborAdvertisement +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv6NeighborAdvertisement) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6NeighborAdvertisement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 20 { + df.SetTruncated() + return errors.New("ICMP layer less then 20 bytes for ICMPv6 neighbor advertisement") + } + + i.Flags = uint8(data[0]) + i.TargetAddress = net.IP(data[4:20]) + i.BaseLayer = BaseLayer{data, nil} // assume no payload + + // truncate old options + i.Options = i.Options[:0] + + return i.Options.DecodeFromBytes(data[20:], df) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6NeighborAdvertisement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if err := i.Options.SerializeTo(b, opts); err != nil { + return err + } + + buf, err := b.PrependBytes(20) + if err != nil { + return err + } + + buf[0] = byte(i.Flags) + copy(buf[1:], lotsOfZeros[:3]) + copy(buf[4:], i.TargetAddress) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv6NeighborAdvertisement) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv6NeighborAdvertisement +} + +// Router indicates whether the sender is a router or not. +func (i *ICMPv6NeighborAdvertisement) Router() bool { + return i.Flags&0x80 != 0 +} + +// Solicited indicates whether the advertisement was solicited or not. +func (i *ICMPv6NeighborAdvertisement) Solicited() bool { + return i.Flags&0x40 != 0 +} + +// Override indicates whether the advertisement should Override an existing +// cache entry. +func (i *ICMPv6NeighborAdvertisement) Override() bool { + return i.Flags&0x20 != 0 +} + +// LayerType returns LayerTypeICMPv6Redirect. +func (i *ICMPv6Redirect) LayerType() gopacket.LayerType { + return LayerTypeICMPv6Redirect +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv6Redirect) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6Redirect) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 36 { + df.SetTruncated() + return errors.New("ICMP layer less then 36 bytes for ICMPv6 redirect") + } + + i.TargetAddress = net.IP(data[4:20]) + i.DestinationAddress = net.IP(data[20:36]) + i.BaseLayer = BaseLayer{data, nil} // assume no payload + + // truncate old options + i.Options = i.Options[:0] + + return i.Options.DecodeFromBytes(data[36:], df) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6Redirect) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if err := i.Options.SerializeTo(b, opts); err != nil { + return err + } + + buf, err := b.PrependBytes(36) + if err != nil { + return err + } + + copy(buf, lotsOfZeros[:4]) + copy(buf[4:], i.TargetAddress) + copy(buf[20:], i.DestinationAddress) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv6Redirect) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv6Redirect +} + +func (i ICMPv6Option) String() string { + hd := hex.EncodeToString(i.Data) + if len(hd) > 0 { + hd = " 0x" + hd + } + + switch i.Type { + case ICMPv6OptSourceAddress, ICMPv6OptTargetAddress: + return fmt.Sprintf("ICMPv6Option(%s:%v)", + i.Type, + net.HardwareAddr(i.Data)) + case ICMPv6OptPrefixInfo: + if len(i.Data) == 30 { + prefixLen := uint8(i.Data[0]) + onLink := (i.Data[1]&0x80 != 0) + autonomous := (i.Data[1]&0x40 != 0) + validLifetime := time.Duration(binary.BigEndian.Uint32(i.Data[2:6])) * time.Second + preferredLifetime := time.Duration(binary.BigEndian.Uint32(i.Data[6:10])) * time.Second + + prefix := net.IP(i.Data[14:]) + + return fmt.Sprintf("ICMPv6Option(%s:%v/%v:%t:%t:%v:%v)", + i.Type, + prefix, prefixLen, + onLink, autonomous, + validLifetime, preferredLifetime) + } + case ICMPv6OptRedirectedHeader: + // could invoke IP decoder on data... probably best not to + break + case ICMPv6OptMTU: + if len(i.Data) == 6 { + return fmt.Sprintf("ICMPv6Option(%s:%v)", + i.Type, + binary.BigEndian.Uint32(i.Data[2:])) + } + + } + return fmt.Sprintf("ICMPv6Option(%s:%s)", i.Type, hd) +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6Options) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + for len(data) > 0 { + if len(data) < 2 { + df.SetTruncated() + return errors.New("ICMP layer less then 2 bytes for ICMPv6 message option") + } + + // unit is 8 octets, convert to bytes + length := int(data[1]) * 8 + + if length == 0 { + df.SetTruncated() + return errors.New("ICMPv6 message option with length 0") + } + + if len(data) < length { + df.SetTruncated() + return fmt.Errorf("ICMP layer only %v bytes for ICMPv6 message option with length %v", len(data), length) + } + + o := ICMPv6Option{ + Type: ICMPv6Opt(data[0]), + Data: data[2:length], + } + + // chop off option we just consumed + data = data[length:] + + *i = append(*i, o) + } + + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6Options) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + for _, opt := range []ICMPv6Option(*i) { + length := len(opt.Data) + 2 + buf, err := b.PrependBytes(length) + if err != nil { + return err + } + + buf[0] = byte(opt.Type) + buf[1] = byte(length / 8) + copy(buf[2:], opt.Data) + } + + return nil +} + +func decodeICMPv6Echo(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv6Echo{} + return decodingLayerDecoder(i, data, p) +} + +func decodeICMPv6RouterSolicitation(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv6RouterSolicitation{} + return decodingLayerDecoder(i, data, p) +} + +func decodeICMPv6RouterAdvertisement(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv6RouterAdvertisement{} + return decodingLayerDecoder(i, data, p) +} + +func decodeICMPv6NeighborSolicitation(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv6NeighborSolicitation{} + return decodingLayerDecoder(i, data, p) +} + +func decodeICMPv6NeighborAdvertisement(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv6NeighborAdvertisement{} + return decodingLayerDecoder(i, data, p) +} + +func decodeICMPv6Redirect(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv6Redirect{} + return decodingLayerDecoder(i, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/igmp.go b/vendor/github.com/google/gopacket/layers/igmp.go new file mode 100644 index 0000000000..d00841535b --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/igmp.go @@ -0,0 +1,355 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "net" + "time" + + "github.com/google/gopacket" +) + +type IGMPType uint8 + +const ( + IGMPMembershipQuery IGMPType = 0x11 // General or group specific query + IGMPMembershipReportV1 IGMPType = 0x12 // Version 1 Membership Report + IGMPMembershipReportV2 IGMPType = 0x16 // Version 2 Membership Report + IGMPLeaveGroup IGMPType = 0x17 // Leave Group + IGMPMembershipReportV3 IGMPType = 0x22 // Version 3 Membership Report +) + +// String conversions for IGMP message types +func (i IGMPType) String() string { + switch i { + case IGMPMembershipQuery: + return "IGMP Membership Query" + case IGMPMembershipReportV1: + return "IGMPv1 Membership Report" + case IGMPMembershipReportV2: + return "IGMPv2 Membership Report" + case IGMPMembershipReportV3: + return "IGMPv3 Membership Report" + case IGMPLeaveGroup: + return "Leave Group" + default: + return "" + } +} + +type IGMPv3GroupRecordType uint8 + +const ( + IGMPIsIn IGMPv3GroupRecordType = 0x01 // Type MODE_IS_INCLUDE, source addresses x + IGMPIsEx IGMPv3GroupRecordType = 0x02 // Type MODE_IS_EXCLUDE, source addresses x + IGMPToIn IGMPv3GroupRecordType = 0x03 // Type CHANGE_TO_INCLUDE_MODE, source addresses x + IGMPToEx IGMPv3GroupRecordType = 0x04 // Type CHANGE_TO_EXCLUDE_MODE, source addresses x + IGMPAllow IGMPv3GroupRecordType = 0x05 // Type ALLOW_NEW_SOURCES, source addresses x + IGMPBlock IGMPv3GroupRecordType = 0x06 // Type BLOCK_OLD_SOURCES, source addresses x +) + +func (i IGMPv3GroupRecordType) String() string { + switch i { + case IGMPIsIn: + return "MODE_IS_INCLUDE" + case IGMPIsEx: + return "MODE_IS_EXCLUDE" + case IGMPToIn: + return "CHANGE_TO_INCLUDE_MODE" + case IGMPToEx: + return "CHANGE_TO_EXCLUDE_MODE" + case IGMPAllow: + return "ALLOW_NEW_SOURCES" + case IGMPBlock: + return "BLOCK_OLD_SOURCES" + default: + return "" + } +} + +// IGMP represents an IGMPv3 message. +type IGMP struct { + BaseLayer + Type IGMPType + MaxResponseTime time.Duration + Checksum uint16 + GroupAddress net.IP + SupressRouterProcessing bool + RobustnessValue uint8 + IntervalTime time.Duration + SourceAddresses []net.IP + NumberOfGroupRecords uint16 + NumberOfSources uint16 + GroupRecords []IGMPv3GroupRecord + Version uint8 // IGMP protocol version +} + +// IGMPv1or2 stores header details for an IGMPv1 or IGMPv2 packet. +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type | Max Resp Time | Checksum | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Group Address | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type IGMPv1or2 struct { + BaseLayer + Type IGMPType // IGMP message type + MaxResponseTime time.Duration // meaningful only in Membership Query messages + Checksum uint16 // 16-bit checksum of entire ip payload + GroupAddress net.IP // either 0 or an IP multicast address + Version uint8 +} + +// decodeResponse dissects IGMPv1 or IGMPv2 packet. +func (i *IGMPv1or2) decodeResponse(data []byte) error { + if len(data) < 8 { + return errors.New("IGMP packet too small") + } + + i.MaxResponseTime = igmpTimeDecode(data[1]) + i.Checksum = binary.BigEndian.Uint16(data[2:4]) + i.GroupAddress = net.IP(data[4:8]) + + return nil +} + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 0x22 | Reserved | Checksum | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Reserved | Number of Group Records (M) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . Group Record [1] . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . Group Record [2] . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . Group Record [M] . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Record Type | Aux Data Len | Number of Sources (N) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Multicast Address | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Source Address [1] | +// +- -+ +// | Source Address [2] | +// +- -+ +// | Source Address [N] | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . Auxiliary Data . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +// IGMPv3GroupRecord stores individual group records for a V3 Membership Report message. +type IGMPv3GroupRecord struct { + Type IGMPv3GroupRecordType + AuxDataLen uint8 // this should always be 0 as per IGMPv3 spec. + NumberOfSources uint16 + MulticastAddress net.IP + SourceAddresses []net.IP + AuxData uint32 // NOT USED +} + +func (i *IGMP) decodeIGMPv3MembershipReport(data []byte) error { + if len(data) < 8 { + return errors.New("IGMPv3 Membership Report too small #1") + } + + i.Checksum = binary.BigEndian.Uint16(data[2:4]) + i.NumberOfGroupRecords = binary.BigEndian.Uint16(data[6:8]) + + recordOffset := 8 + for j := 0; j < int(i.NumberOfGroupRecords); j++ { + if len(data) < recordOffset+8 { + return errors.New("IGMPv3 Membership Report too small #2") + } + + var gr IGMPv3GroupRecord + gr.Type = IGMPv3GroupRecordType(data[recordOffset]) + gr.AuxDataLen = data[recordOffset+1] + gr.NumberOfSources = binary.BigEndian.Uint16(data[recordOffset+2 : recordOffset+4]) + gr.MulticastAddress = net.IP(data[recordOffset+4 : recordOffset+8]) + + if len(data) < recordOffset+8+int(gr.NumberOfSources)*4 { + return errors.New("IGMPv3 Membership Report too small #3") + } + + // append source address records. + for i := 0; i < int(gr.NumberOfSources); i++ { + sourceAddr := net.IP(data[recordOffset+8+i*4 : recordOffset+12+i*4]) + gr.SourceAddresses = append(gr.SourceAddresses, sourceAddr) + } + + i.GroupRecords = append(i.GroupRecords, gr) + recordOffset += 8 + 4*int(gr.NumberOfSources) + } + return nil +} + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 0x11 | Max Resp Code | Checksum | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Group Address | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Resv |S| QRV | QQIC | Number of Sources (N) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Source Address [1] | +// +- -+ +// | Source Address [2] | +// +- . -+ +// | Source Address [N] | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// decodeIGMPv3MembershipQuery parses the IGMPv3 message of type 0x11 +func (i *IGMP) decodeIGMPv3MembershipQuery(data []byte) error { + if len(data) < 12 { + return errors.New("IGMPv3 Membership Query too small #1") + } + + i.MaxResponseTime = igmpTimeDecode(data[1]) + i.Checksum = binary.BigEndian.Uint16(data[2:4]) + i.SupressRouterProcessing = data[8]&0x8 != 0 + i.GroupAddress = net.IP(data[4:8]) + i.RobustnessValue = data[8] & 0x7 + i.IntervalTime = igmpTimeDecode(data[9]) + i.NumberOfSources = binary.BigEndian.Uint16(data[10:12]) + + if len(data) < 12+int(i.NumberOfSources)*4 { + return errors.New("IGMPv3 Membership Query too small #2") + } + + for j := 0; j < int(i.NumberOfSources); j++ { + i.SourceAddresses = append(i.SourceAddresses, net.IP(data[12+j*4:16+j*4])) + } + + return nil +} + +// igmpTimeDecode decodes the duration created by the given byte, using the +// algorithm in http://www.rfc-base.org/txt/rfc-3376.txt section 4.1.1. +func igmpTimeDecode(t uint8) time.Duration { + if t&0x80 == 0 { + return time.Millisecond * 100 * time.Duration(t) + } + mant := (t & 0x70) >> 4 + exp := t & 0x0F + return time.Millisecond * 100 * time.Duration((mant|0x10)<<(exp+3)) +} + +// LayerType returns LayerTypeIGMP for the V1,2,3 message protocol formats. +func (i *IGMP) LayerType() gopacket.LayerType { return LayerTypeIGMP } +func (i *IGMPv1or2) LayerType() gopacket.LayerType { return LayerTypeIGMP } + +func (i *IGMPv1or2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + return errors.New("IGMP Packet too small") + } + + i.Type = IGMPType(data[0]) + i.MaxResponseTime = igmpTimeDecode(data[1]) + i.Checksum = binary.BigEndian.Uint16(data[2:4]) + i.GroupAddress = net.IP(data[4:8]) + + return nil +} + +func (i *IGMPv1or2) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +func (i *IGMPv1or2) CanDecode() gopacket.LayerClass { + return LayerTypeIGMP +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *IGMP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 1 { + return errors.New("IGMP packet is too small") + } + + // common IGMP header values between versions 1..3 of IGMP specification.. + i.Type = IGMPType(data[0]) + + switch i.Type { + case IGMPMembershipQuery: + i.decodeIGMPv3MembershipQuery(data) + case IGMPMembershipReportV3: + i.decodeIGMPv3MembershipReport(data) + default: + return errors.New("unsupported IGMP type") + } + + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *IGMP) CanDecode() gopacket.LayerClass { + return LayerTypeIGMP +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *IGMP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// decodeIGMP will parse IGMP v1,2 or 3 protocols. Checks against the +// IGMP type are performed against byte[0], logic then iniitalizes and +// passes the appropriate struct (IGMP or IGMPv1or2) to +// decodingLayerDecoder. +func decodeIGMP(data []byte, p gopacket.PacketBuilder) error { + if len(data) < 1 { + return errors.New("IGMP packet is too small") + } + + // byte 0 contains IGMP message type. + switch IGMPType(data[0]) { + case IGMPMembershipQuery: + // IGMPv3 Membership Query payload is >= 12 + if len(data) >= 12 { + i := &IGMP{Version: 3} + return decodingLayerDecoder(i, data, p) + } else if len(data) == 8 { + i := &IGMPv1or2{} + if data[1] == 0x00 { + i.Version = 1 // IGMPv1 has a query length of 8 and MaxResp = 0 + } else { + i.Version = 2 // IGMPv2 has a query length of 8 and MaxResp != 0 + } + + return decodingLayerDecoder(i, data, p) + } + case IGMPMembershipReportV3: + i := &IGMP{Version: 3} + return decodingLayerDecoder(i, data, p) + case IGMPMembershipReportV1: + i := &IGMPv1or2{Version: 1} + return decodingLayerDecoder(i, data, p) + case IGMPLeaveGroup, IGMPMembershipReportV2: + // leave group and Query Report v2 used in IGMPv2 only. + i := &IGMPv1or2{Version: 2} + return decodingLayerDecoder(i, data, p) + default: + } + + return errors.New("Unable to determine IGMP type.") +} diff --git a/vendor/github.com/google/gopacket/layers/ip4.go b/vendor/github.com/google/gopacket/layers/ip4.go new file mode 100644 index 0000000000..2b3c0c6bff --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/ip4.go @@ -0,0 +1,325 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + "strings" + + "github.com/google/gopacket" +) + +type IPv4Flag uint8 + +const ( + IPv4EvilBit IPv4Flag = 1 << 2 // http://tools.ietf.org/html/rfc3514 ;) + IPv4DontFragment IPv4Flag = 1 << 1 + IPv4MoreFragments IPv4Flag = 1 << 0 +) + +func (f IPv4Flag) String() string { + var s []string + if f&IPv4EvilBit != 0 { + s = append(s, "Evil") + } + if f&IPv4DontFragment != 0 { + s = append(s, "DF") + } + if f&IPv4MoreFragments != 0 { + s = append(s, "MF") + } + return strings.Join(s, "|") +} + +// IPv4 is the header of an IP packet. +type IPv4 struct { + BaseLayer + Version uint8 + IHL uint8 + TOS uint8 + Length uint16 + Id uint16 + Flags IPv4Flag + FragOffset uint16 + TTL uint8 + Protocol IPProtocol + Checksum uint16 + SrcIP net.IP + DstIP net.IP + Options []IPv4Option + Padding []byte +} + +// LayerType returns LayerTypeIPv4 +func (i *IPv4) LayerType() gopacket.LayerType { return LayerTypeIPv4 } +func (i *IPv4) NetworkFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointIPv4, i.SrcIP, i.DstIP) +} + +type IPv4Option struct { + OptionType uint8 + OptionLength uint8 + OptionData []byte +} + +func (i IPv4Option) String() string { + return fmt.Sprintf("IPv4Option(%v:%v)", i.OptionType, i.OptionData) +} + +// for the current ipv4 options, return the number of bytes (including +// padding that the options used) +func (ip *IPv4) getIPv4OptionSize() uint8 { + optionSize := uint8(0) + for _, opt := range ip.Options { + switch opt.OptionType { + case 0: + // this is the end of option lists + optionSize++ + case 1: + // this is the padding + optionSize++ + default: + optionSize += opt.OptionLength + + } + } + // make sure the options are aligned to 32 bit boundary + if (optionSize % 4) != 0 { + optionSize += 4 - (optionSize % 4) + } + return optionSize +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +func (ip *IPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + optionLength := ip.getIPv4OptionSize() + bytes, err := b.PrependBytes(20 + int(optionLength)) + if err != nil { + return err + } + if opts.FixLengths { + ip.IHL = 5 + (optionLength / 4) + ip.Length = uint16(len(b.Bytes())) + } + bytes[0] = (ip.Version << 4) | ip.IHL + bytes[1] = ip.TOS + binary.BigEndian.PutUint16(bytes[2:], ip.Length) + binary.BigEndian.PutUint16(bytes[4:], ip.Id) + binary.BigEndian.PutUint16(bytes[6:], ip.flagsfrags()) + bytes[8] = ip.TTL + bytes[9] = byte(ip.Protocol) + if err := ip.AddressTo4(); err != nil { + return err + } + copy(bytes[12:16], ip.SrcIP) + copy(bytes[16:20], ip.DstIP) + + curLocation := 20 + // Now, we will encode the options + for _, opt := range ip.Options { + switch opt.OptionType { + case 0: + // this is the end of option lists + bytes[curLocation] = 0 + curLocation++ + case 1: + // this is the padding + bytes[curLocation] = 1 + curLocation++ + default: + bytes[curLocation] = opt.OptionType + bytes[curLocation+1] = opt.OptionLength + + // sanity checking to protect us from buffer overrun + if len(opt.OptionData) > int(opt.OptionLength-2) { + return errors.New("option length is smaller than length of option data") + } + copy(bytes[curLocation+2:curLocation+int(opt.OptionLength)], opt.OptionData) + curLocation += int(opt.OptionLength) + } + } + + if opts.ComputeChecksums { + ip.Checksum = checksum(bytes) + } + binary.BigEndian.PutUint16(bytes[10:], ip.Checksum) + return nil +} + +func checksum(bytes []byte) uint16 { + // Clear checksum bytes + bytes[10] = 0 + bytes[11] = 0 + + // Compute checksum + var csum uint32 + for i := 0; i < len(bytes); i += 2 { + csum += uint32(bytes[i]) << 8 + csum += uint32(bytes[i+1]) + } + for { + // Break when sum is less or equals to 0xFFFF + if csum <= 65535 { + break + } + // Add carry to the sum + csum = (csum >> 16) + uint32(uint16(csum)) + } + // Flip all the bits + return ^uint16(csum) +} + +func (ip *IPv4) flagsfrags() (ff uint16) { + ff |= uint16(ip.Flags) << 13 + ff |= ip.FragOffset + return +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (ip *IPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 20 { + df.SetTruncated() + return fmt.Errorf("Invalid ip4 header. Length %d less than 20", len(data)) + } + flagsfrags := binary.BigEndian.Uint16(data[6:8]) + + ip.Version = uint8(data[0]) >> 4 + ip.IHL = uint8(data[0]) & 0x0F + ip.TOS = data[1] + ip.Length = binary.BigEndian.Uint16(data[2:4]) + ip.Id = binary.BigEndian.Uint16(data[4:6]) + ip.Flags = IPv4Flag(flagsfrags >> 13) + ip.FragOffset = flagsfrags & 0x1FFF + ip.TTL = data[8] + ip.Protocol = IPProtocol(data[9]) + ip.Checksum = binary.BigEndian.Uint16(data[10:12]) + ip.SrcIP = data[12:16] + ip.DstIP = data[16:20] + ip.Options = ip.Options[:0] + ip.Padding = nil + // Set up an initial guess for contents/payload... we'll reset these soon. + ip.BaseLayer = BaseLayer{Contents: data} + + // This code is added for the following enviroment: + // * Windows 10 with TSO option activated. ( tested on Hyper-V, RealTek ethernet driver ) + if ip.Length == 0 { + // If using TSO(TCP Segmentation Offload), length is zero. + // The actual packet length is the length of data. + ip.Length = uint16(len(data)) + } + + if ip.Length < 20 { + return fmt.Errorf("Invalid (too small) IP length (%d < 20)", ip.Length) + } else if ip.IHL < 5 { + return fmt.Errorf("Invalid (too small) IP header length (%d < 5)", ip.IHL) + } else if int(ip.IHL*4) > int(ip.Length) { + return fmt.Errorf("Invalid IP header length > IP length (%d > %d)", ip.IHL, ip.Length) + } + if cmp := len(data) - int(ip.Length); cmp > 0 { + data = data[:ip.Length] + } else if cmp < 0 { + df.SetTruncated() + if int(ip.IHL)*4 > len(data) { + return errors.New("Not all IP header bytes available") + } + } + ip.Contents = data[:ip.IHL*4] + ip.Payload = data[ip.IHL*4:] + // From here on, data contains the header options. + data = data[20 : ip.IHL*4] + // Pull out IP options + for len(data) > 0 { + if ip.Options == nil { + // Pre-allocate to avoid growing the slice too much. + ip.Options = make([]IPv4Option, 0, 4) + } + opt := IPv4Option{OptionType: data[0]} + switch opt.OptionType { + case 0: // End of options + opt.OptionLength = 1 + ip.Options = append(ip.Options, opt) + ip.Padding = data[1:] + return nil + case 1: // 1 byte padding + opt.OptionLength = 1 + data = data[1:] + ip.Options = append(ip.Options, opt) + default: + if len(data) < 2 { + df.SetTruncated() + return fmt.Errorf("Invalid ip4 option length. Length %d less than 2", len(data)) + } + opt.OptionLength = data[1] + if len(data) < int(opt.OptionLength) { + df.SetTruncated() + return fmt.Errorf("IP option length exceeds remaining IP header size, option type %v length %v", opt.OptionType, opt.OptionLength) + } + if opt.OptionLength <= 2 { + return fmt.Errorf("Invalid IP option type %v length %d. Must be greater than 2", opt.OptionType, opt.OptionLength) + } + opt.OptionData = data[2:opt.OptionLength] + data = data[opt.OptionLength:] + ip.Options = append(ip.Options, opt) + } + } + return nil +} + +func (i *IPv4) CanDecode() gopacket.LayerClass { + return LayerTypeIPv4 +} + +func (i *IPv4) NextLayerType() gopacket.LayerType { + if i.Flags&IPv4MoreFragments != 0 || i.FragOffset != 0 { + return gopacket.LayerTypeFragment + } + return i.Protocol.LayerType() +} + +func decodeIPv4(data []byte, p gopacket.PacketBuilder) error { + ip := &IPv4{} + err := ip.DecodeFromBytes(data, p) + p.AddLayer(ip) + p.SetNetworkLayer(ip) + if err != nil { + return err + } + return p.NextDecoder(ip.NextLayerType()) +} + +func checkIPv4Address(addr net.IP) (net.IP, error) { + if c := addr.To4(); c != nil { + return c, nil + } + if len(addr) == net.IPv6len { + return nil, errors.New("address is IPv6") + } + return nil, fmt.Errorf("wrong length of %d bytes instead of %d", len(addr), net.IPv4len) +} + +func (ip *IPv4) AddressTo4() error { + var src, dst net.IP + + if addr, err := checkIPv4Address(ip.SrcIP); err != nil { + return fmt.Errorf("Invalid source IPv4 address (%s)", err) + } else { + src = addr + } + if addr, err := checkIPv4Address(ip.DstIP); err != nil { + return fmt.Errorf("Invalid destination IPv4 address (%s)", err) + } else { + dst = addr + } + ip.SrcIP = src + ip.DstIP = dst + return nil +} diff --git a/vendor/github.com/google/gopacket/layers/ip6.go b/vendor/github.com/google/gopacket/layers/ip6.go new file mode 100644 index 0000000000..87b9d33d51 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/ip6.go @@ -0,0 +1,722 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + + "github.com/google/gopacket" +) + +const ( + // IPv6HopByHopOptionJumbogram code as defined in RFC 2675 + IPv6HopByHopOptionJumbogram = 0xC2 +) + +const ( + ipv6MaxPayloadLength = 65535 +) + +// IPv6 is the layer for the IPv6 header. +type IPv6 struct { + // http://www.networksorcery.com/enp/protocol/ipv6.htm + BaseLayer + Version uint8 + TrafficClass uint8 + FlowLabel uint32 + Length uint16 + NextHeader IPProtocol + HopLimit uint8 + SrcIP net.IP + DstIP net.IP + HopByHop *IPv6HopByHop + // hbh will be pointed to by HopByHop if that layer exists. + hbh IPv6HopByHop +} + +// LayerType returns LayerTypeIPv6 +func (ipv6 *IPv6) LayerType() gopacket.LayerType { return LayerTypeIPv6 } + +// NetworkFlow returns this new Flow (EndpointIPv6, SrcIP, DstIP) +func (ipv6 *IPv6) NetworkFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointIPv6, ipv6.SrcIP, ipv6.DstIP) +} + +// Search for Jumbo Payload TLV in IPv6HopByHop and return (length, true) if found +func getIPv6HopByHopJumboLength(hopopts *IPv6HopByHop) (uint32, bool, error) { + var tlv *IPv6HopByHopOption + + for _, t := range hopopts.Options { + if t.OptionType == IPv6HopByHopOptionJumbogram { + tlv = t + break + } + } + if tlv == nil { + // Not found + return 0, false, nil + } + if len(tlv.OptionData) != 4 { + return 0, false, errors.New("Jumbo length TLV data must have length 4") + } + l := binary.BigEndian.Uint32(tlv.OptionData) + if l <= ipv6MaxPayloadLength { + return 0, false, fmt.Errorf("Jumbo length cannot be less than %d", ipv6MaxPayloadLength+1) + } + // Found + return l, true, nil +} + +// Adds zero-valued Jumbo TLV to IPv6 header if it does not exist +// (if necessary add hop-by-hop header) +func addIPv6JumboOption(ip6 *IPv6) { + var tlv *IPv6HopByHopOption + + if ip6.HopByHop == nil { + // Add IPv6 HopByHop + ip6.HopByHop = &IPv6HopByHop{} + ip6.HopByHop.NextHeader = ip6.NextHeader + ip6.HopByHop.HeaderLength = 0 + ip6.NextHeader = IPProtocolIPv6HopByHop + } + for _, t := range ip6.HopByHop.Options { + if t.OptionType == IPv6HopByHopOptionJumbogram { + tlv = t + break + } + } + if tlv == nil { + // Add Jumbo TLV + tlv = &IPv6HopByHopOption{} + ip6.HopByHop.Options = append(ip6.HopByHop.Options, tlv) + } + tlv.SetJumboLength(0) +} + +// Set jumbo length in serialized IPv6 payload (starting with HopByHop header) +func setIPv6PayloadJumboLength(hbh []byte) error { + pLen := len(hbh) + if pLen < 8 { + //HopByHop is minimum 8 bytes + return fmt.Errorf("Invalid IPv6 payload (length %d)", pLen) + } + hbhLen := int((hbh[1] + 1) * 8) + if hbhLen > pLen { + return fmt.Errorf("Invalid hop-by-hop length (length: %d, payload: %d", hbhLen, pLen) + } + offset := 2 //start with options + for offset < hbhLen { + opt := hbh[offset] + if opt == 0 { + //Pad1 + offset++ + continue + } + optLen := int(hbh[offset+1]) + if opt == IPv6HopByHopOptionJumbogram { + if optLen == 4 { + binary.BigEndian.PutUint32(hbh[offset+2:], uint32(pLen)) + return nil + } + return fmt.Errorf("Jumbo TLV too short (%d bytes)", optLen) + } + offset += 2 + optLen + } + return errors.New("Jumbo TLV not found") +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (ipv6 *IPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var jumbo bool + var err error + + payload := b.Bytes() + pLen := len(payload) + if pLen > ipv6MaxPayloadLength { + jumbo = true + if opts.FixLengths { + // We need to set the length later because the hop-by-hop header may + // not exist or else need padding, so pLen may yet change + addIPv6JumboOption(ipv6) + } else if ipv6.HopByHop == nil { + return fmt.Errorf("Cannot fit payload length of %d into IPv6 packet", pLen) + } else { + _, ok, err := getIPv6HopByHopJumboLength(ipv6.HopByHop) + if err != nil { + return err + } + if !ok { + return errors.New("Missing jumbo length hop-by-hop option") + } + } + } + + hbhAlreadySerialized := false + if ipv6.HopByHop != nil { + for _, l := range b.Layers() { + if l == LayerTypeIPv6HopByHop { + hbhAlreadySerialized = true + break + } + } + } + if ipv6.HopByHop != nil && !hbhAlreadySerialized { + if ipv6.NextHeader != IPProtocolIPv6HopByHop { + // Just fix it instead of throwing an error + ipv6.NextHeader = IPProtocolIPv6HopByHop + } + err = ipv6.HopByHop.SerializeTo(b, opts) + if err != nil { + return err + } + payload = b.Bytes() + pLen = len(payload) + if opts.FixLengths && jumbo { + err := setIPv6PayloadJumboLength(payload) + if err != nil { + return err + } + } + } + + if !jumbo && pLen > ipv6MaxPayloadLength { + return errors.New("Cannot fit payload into IPv6 header") + } + bytes, err := b.PrependBytes(40) + if err != nil { + return err + } + bytes[0] = (ipv6.Version << 4) | (ipv6.TrafficClass >> 4) + bytes[1] = (ipv6.TrafficClass << 4) | uint8(ipv6.FlowLabel>>16) + binary.BigEndian.PutUint16(bytes[2:], uint16(ipv6.FlowLabel)) + if opts.FixLengths { + if jumbo { + ipv6.Length = 0 + } else { + ipv6.Length = uint16(pLen) + } + } + binary.BigEndian.PutUint16(bytes[4:], ipv6.Length) + bytes[6] = byte(ipv6.NextHeader) + bytes[7] = byte(ipv6.HopLimit) + if err := ipv6.AddressTo16(); err != nil { + return err + } + copy(bytes[8:], ipv6.SrcIP) + copy(bytes[24:], ipv6.DstIP) + return nil +} + +// DecodeFromBytes implementation according to gopacket.DecodingLayer +func (ipv6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 40 { + df.SetTruncated() + return fmt.Errorf("Invalid ip6 header. Length %d less than 40", len(data)) + } + ipv6.Version = uint8(data[0]) >> 4 + ipv6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF) + ipv6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF + ipv6.Length = binary.BigEndian.Uint16(data[4:6]) + ipv6.NextHeader = IPProtocol(data[6]) + ipv6.HopLimit = data[7] + ipv6.SrcIP = data[8:24] + ipv6.DstIP = data[24:40] + ipv6.HopByHop = nil + ipv6.BaseLayer = BaseLayer{data[:40], data[40:]} + + // We treat a HopByHop IPv6 option as part of the IPv6 packet, since its + // options are crucial for understanding what's actually happening per packet. + if ipv6.NextHeader == IPProtocolIPv6HopByHop { + err := ipv6.hbh.DecodeFromBytes(ipv6.Payload, df) + if err != nil { + return err + } + ipv6.HopByHop = &ipv6.hbh + pEnd, jumbo, err := getIPv6HopByHopJumboLength(ipv6.HopByHop) + if err != nil { + return err + } + if jumbo && ipv6.Length == 0 { + pEnd := int(pEnd) + if pEnd > len(ipv6.Payload) { + df.SetTruncated() + pEnd = len(ipv6.Payload) + } + ipv6.Payload = ipv6.Payload[:pEnd] + return nil + } else if jumbo && ipv6.Length != 0 { + return errors.New("IPv6 has jumbo length and IPv6 length is not 0") + } else if !jumbo && ipv6.Length == 0 { + return errors.New("IPv6 length 0, but HopByHop header does not have jumbogram option") + } else { + ipv6.Payload = ipv6.Payload[ipv6.hbh.ActualLength:] + } + } + + if ipv6.Length == 0 { + return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ipv6.NextHeader) + } + + pEnd := int(ipv6.Length) + if pEnd > len(ipv6.Payload) { + df.SetTruncated() + pEnd = len(ipv6.Payload) + } + ipv6.Payload = ipv6.Payload[:pEnd] + + return nil +} + +// CanDecode implementation according to gopacket.DecodingLayer +func (ipv6 *IPv6) CanDecode() gopacket.LayerClass { + return LayerTypeIPv6 +} + +// NextLayerType implementation according to gopacket.DecodingLayer +func (ipv6 *IPv6) NextLayerType() gopacket.LayerType { + if ipv6.HopByHop != nil { + return ipv6.HopByHop.NextHeader.LayerType() + } + return ipv6.NextHeader.LayerType() +} + +func decodeIPv6(data []byte, p gopacket.PacketBuilder) error { + ip6 := &IPv6{} + err := ip6.DecodeFromBytes(data, p) + p.AddLayer(ip6) + p.SetNetworkLayer(ip6) + if ip6.HopByHop != nil { + p.AddLayer(ip6.HopByHop) + } + if err != nil { + return err + } + return p.NextDecoder(ip6.NextLayerType()) +} + +type ipv6HeaderTLVOption struct { + OptionType, OptionLength uint8 + ActualLength int + OptionData []byte + OptionAlignment [2]uint8 // Xn+Y = [2]uint8{X, Y} +} + +func (h *ipv6HeaderTLVOption) serializeTo(data []byte, fixLengths bool, dryrun bool) int { + if fixLengths { + h.OptionLength = uint8(len(h.OptionData)) + } + length := int(h.OptionLength) + 2 + if !dryrun { + data[0] = h.OptionType + data[1] = h.OptionLength + copy(data[2:], h.OptionData) + } + return length +} + +func decodeIPv6HeaderTLVOption(data []byte, df gopacket.DecodeFeedback) (h *ipv6HeaderTLVOption, _ error) { + if len(data) < 2 { + df.SetTruncated() + return nil, errors.New("IPv6 header option too small") + } + h = &ipv6HeaderTLVOption{} + if data[0] == 0 { + h.ActualLength = 1 + return + } + h.OptionType = data[0] + h.OptionLength = data[1] + h.ActualLength = int(h.OptionLength) + 2 + if len(data) < h.ActualLength { + df.SetTruncated() + return nil, errors.New("IPv6 header TLV option too small") + } + h.OptionData = data[2:h.ActualLength] + return +} + +func serializeTLVOptionPadding(data []byte, padLength int) { + if padLength <= 0 { + return + } + if padLength == 1 { + data[0] = 0x0 + return + } + tlvLength := uint8(padLength) - 2 + data[0] = 0x1 + data[1] = tlvLength + if tlvLength != 0 { + for k := range data[2:] { + data[k+2] = 0x0 + } + } + return +} + +// If buf is 'nil' do a serialize dry run +func serializeIPv6HeaderTLVOptions(buf []byte, options []*ipv6HeaderTLVOption, fixLengths bool) int { + var l int + + dryrun := buf == nil + length := 2 + for _, opt := range options { + if fixLengths { + x := int(opt.OptionAlignment[0]) + y := int(opt.OptionAlignment[1]) + if x != 0 { + n := length / x + offset := x*n + y + if offset < length { + offset += x + } + if length != offset { + pad := offset - length + if !dryrun { + serializeTLVOptionPadding(buf[length-2:], pad) + } + length += pad + } + } + } + if dryrun { + l = opt.serializeTo(nil, fixLengths, true) + } else { + l = opt.serializeTo(buf[length-2:], fixLengths, false) + } + length += l + } + if fixLengths { + pad := length % 8 + if pad != 0 { + if !dryrun { + serializeTLVOptionPadding(buf[length-2:], pad) + } + length += pad + } + } + return length - 2 +} + +type ipv6ExtensionBase struct { + BaseLayer + NextHeader IPProtocol + HeaderLength uint8 + ActualLength int +} + +func decodeIPv6ExtensionBase(data []byte, df gopacket.DecodeFeedback) (i ipv6ExtensionBase, returnedErr error) { + if len(data) < 2 { + df.SetTruncated() + return ipv6ExtensionBase{}, fmt.Errorf("Invalid ip6-extension header. Length %d less than 2", len(data)) + } + i.NextHeader = IPProtocol(data[0]) + i.HeaderLength = data[1] + i.ActualLength = int(i.HeaderLength)*8 + 8 + if len(data) < i.ActualLength { + return ipv6ExtensionBase{}, fmt.Errorf("Invalid ip6-extension header. Length %d less than specified length %d", len(data), i.ActualLength) + } + i.Contents = data[:i.ActualLength] + i.Payload = data[i.ActualLength:] + return +} + +// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6 +// extensions. You can use it with a DecodingLayerParser to handle IPv6 stacks +// which may or may not have extensions. +type IPv6ExtensionSkipper struct { + NextHeader IPProtocol + BaseLayer +} + +// DecodeFromBytes implementation according to gopacket.DecodingLayer +func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + extension, err := decodeIPv6ExtensionBase(data, df) + if err != nil { + return err + } + i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]} + i.NextHeader = extension.NextHeader + return nil +} + +// CanDecode implementation according to gopacket.DecodingLayer +func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass { + return LayerClassIPv6Extension +} + +// NextLayerType implementation according to gopacket.DecodingLayer +func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType { + return i.NextHeader.LayerType() +} + +// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension. +type IPv6HopByHopOption ipv6HeaderTLVOption + +// IPv6HopByHop is the IPv6 hop-by-hop extension. +type IPv6HopByHop struct { + ipv6ExtensionBase + Options []*IPv6HopByHopOption +} + +// LayerType returns LayerTypeIPv6HopByHop. +func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop } + +// SerializeTo implementation according to gopacket.SerializableLayer +func (i *IPv6HopByHop) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var bytes []byte + var err error + + o := make([]*ipv6HeaderTLVOption, 0, len(i.Options)) + for _, v := range i.Options { + o = append(o, (*ipv6HeaderTLVOption)(v)) + } + + l := serializeIPv6HeaderTLVOptions(nil, o, opts.FixLengths) + bytes, err = b.PrependBytes(l) + if err != nil { + return err + } + serializeIPv6HeaderTLVOptions(bytes, o, opts.FixLengths) + + length := len(bytes) + 2 + if length%8 != 0 { + return errors.New("IPv6HopByHop actual length must be multiple of 8") + } + bytes, err = b.PrependBytes(2) + if err != nil { + return err + } + bytes[0] = uint8(i.NextHeader) + if opts.FixLengths { + i.HeaderLength = uint8((length / 8) - 1) + } + bytes[1] = uint8(i.HeaderLength) + return nil +} + +// DecodeFromBytes implementation according to gopacket.DecodingLayer +func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + var err error + i.ipv6ExtensionBase, err = decodeIPv6ExtensionBase(data, df) + if err != nil { + return err + } + i.Options = i.Options[:0] + offset := 2 + for offset < i.ActualLength { + opt, err := decodeIPv6HeaderTLVOption(data[offset:], df) + if err != nil { + return err + } + i.Options = append(i.Options, (*IPv6HopByHopOption)(opt)) + offset += opt.ActualLength + } + return nil +} + +func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error { + i := &IPv6HopByHop{} + err := i.DecodeFromBytes(data, p) + p.AddLayer(i) + if err != nil { + return err + } + return p.NextDecoder(i.NextHeader) +} + +// SetJumboLength adds the IPv6HopByHopOptionJumbogram with the given length +func (o *IPv6HopByHopOption) SetJumboLength(len uint32) { + o.OptionType = IPv6HopByHopOptionJumbogram + o.OptionLength = 4 + o.ActualLength = 6 + if o.OptionData == nil { + o.OptionData = make([]byte, 4) + } + binary.BigEndian.PutUint32(o.OptionData, len) + o.OptionAlignment = [2]uint8{4, 2} +} + +// IPv6Routing is the IPv6 routing extension. +type IPv6Routing struct { + ipv6ExtensionBase + RoutingType uint8 + SegmentsLeft uint8 + // This segment is supposed to be zero according to RFC2460, the second set of + // 4 bytes in the extension. + Reserved []byte + // SourceRoutingIPs is the set of IPv6 addresses requested for source routing, + // set only if RoutingType == 0. + SourceRoutingIPs []net.IP +} + +// LayerType returns LayerTypeIPv6Routing. +func (i *IPv6Routing) LayerType() gopacket.LayerType { return LayerTypeIPv6Routing } + +func decodeIPv6Routing(data []byte, p gopacket.PacketBuilder) error { + base, err := decodeIPv6ExtensionBase(data, p) + if err != nil { + return err + } + i := &IPv6Routing{ + ipv6ExtensionBase: base, + RoutingType: data[2], + SegmentsLeft: data[3], + Reserved: data[4:8], + } + switch i.RoutingType { + case 0: // Source routing + if (i.ActualLength-8)%16 != 0 { + return fmt.Errorf("Invalid IPv6 source routing, length of type 0 packet %d", i.ActualLength) + } + for d := i.Contents[8:]; len(d) >= 16; d = d[16:] { + i.SourceRoutingIPs = append(i.SourceRoutingIPs, net.IP(d[:16])) + } + default: + return fmt.Errorf("Unknown IPv6 routing header type %d", i.RoutingType) + } + p.AddLayer(i) + return p.NextDecoder(i.NextHeader) +} + +// IPv6Fragment is the IPv6 fragment header, used for packet +// fragmentation/defragmentation. +type IPv6Fragment struct { + BaseLayer + NextHeader IPProtocol + // Reserved1 is bits [8-16), from least to most significant, 0-indexed + Reserved1 uint8 + FragmentOffset uint16 + // Reserved2 is bits [29-31), from least to most significant, 0-indexed + Reserved2 uint8 + MoreFragments bool + Identification uint32 +} + +// LayerType returns LayerTypeIPv6Fragment. +func (i *IPv6Fragment) LayerType() gopacket.LayerType { return LayerTypeIPv6Fragment } + +func decodeIPv6Fragment(data []byte, p gopacket.PacketBuilder) error { + if len(data) < 8 { + p.SetTruncated() + return fmt.Errorf("Invalid ip6-fragment header. Length %d less than 8", len(data)) + } + i := &IPv6Fragment{ + BaseLayer: BaseLayer{data[:8], data[8:]}, + NextHeader: IPProtocol(data[0]), + Reserved1: data[1], + FragmentOffset: binary.BigEndian.Uint16(data[2:4]) >> 3, + Reserved2: data[3] & 0x6 >> 1, + MoreFragments: data[3]&0x1 != 0, + Identification: binary.BigEndian.Uint32(data[4:8]), + } + p.AddLayer(i) + return p.NextDecoder(gopacket.DecodeFragment) +} + +// IPv6DestinationOption is a TLV option present in an IPv6 destination options extension. +type IPv6DestinationOption ipv6HeaderTLVOption + +// IPv6Destination is the IPv6 destination options header. +type IPv6Destination struct { + ipv6ExtensionBase + Options []*IPv6DestinationOption +} + +// LayerType returns LayerTypeIPv6Destination. +func (i *IPv6Destination) LayerType() gopacket.LayerType { return LayerTypeIPv6Destination } + +// DecodeFromBytes implementation according to gopacket.DecodingLayer +func (i *IPv6Destination) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + var err error + i.ipv6ExtensionBase, err = decodeIPv6ExtensionBase(data, df) + if err != nil { + return err + } + offset := 2 + for offset < i.ActualLength { + opt, err := decodeIPv6HeaderTLVOption(data[offset:], df) + if err != nil { + return err + } + i.Options = append(i.Options, (*IPv6DestinationOption)(opt)) + offset += opt.ActualLength + } + return nil +} + +func decodeIPv6Destination(data []byte, p gopacket.PacketBuilder) error { + i := &IPv6Destination{} + err := i.DecodeFromBytes(data, p) + p.AddLayer(i) + if err != nil { + return err + } + return p.NextDecoder(i.NextHeader) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *IPv6Destination) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var bytes []byte + var err error + + o := make([]*ipv6HeaderTLVOption, 0, len(i.Options)) + for _, v := range i.Options { + o = append(o, (*ipv6HeaderTLVOption)(v)) + } + + l := serializeIPv6HeaderTLVOptions(nil, o, opts.FixLengths) + bytes, err = b.PrependBytes(l) + if err != nil { + return err + } + serializeIPv6HeaderTLVOptions(bytes, o, opts.FixLengths) + + length := len(bytes) + 2 + if length%8 != 0 { + return errors.New("IPv6Destination actual length must be multiple of 8") + } + bytes, err = b.PrependBytes(2) + if err != nil { + return err + } + bytes[0] = uint8(i.NextHeader) + if opts.FixLengths { + i.HeaderLength = uint8((length / 8) - 1) + } + bytes[1] = uint8(i.HeaderLength) + return nil +} + +func checkIPv6Address(addr net.IP) error { + if len(addr) == net.IPv6len { + return nil + } + if len(addr) == net.IPv4len { + return errors.New("address is IPv4") + } + return fmt.Errorf("wrong length of %d bytes instead of %d", len(addr), net.IPv6len) +} + +// AddressTo16 ensures IPv6.SrcIP and IPv6.DstIP are actually IPv6 addresses (i.e. 16 byte addresses) +func (ipv6 *IPv6) AddressTo16() error { + if err := checkIPv6Address(ipv6.SrcIP); err != nil { + return fmt.Errorf("Invalid source IPv6 address (%s)", err) + } + if err := checkIPv6Address(ipv6.DstIP); err != nil { + return fmt.Errorf("Invalid destination IPv6 address (%s)", err) + } + return nil +} diff --git a/vendor/github.com/google/gopacket/layers/ipsec.go b/vendor/github.com/google/gopacket/layers/ipsec.go new file mode 100644 index 0000000000..12f31caf67 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/ipsec.go @@ -0,0 +1,77 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "github.com/google/gopacket" +) + +// IPSecAH is the authentication header for IPv4/6 defined in +// http://tools.ietf.org/html/rfc2402 +type IPSecAH struct { + // While the auth header can be used for both IPv4 and v6, its format is that of + // an IPv6 extension (NextHeader, PayloadLength, etc...), so we use ipv6ExtensionBase + // to build it. + ipv6ExtensionBase + Reserved uint16 + SPI, Seq uint32 + AuthenticationData []byte +} + +// LayerType returns LayerTypeIPSecAH. +func (i *IPSecAH) LayerType() gopacket.LayerType { return LayerTypeIPSecAH } + +func decodeIPSecAH(data []byte, p gopacket.PacketBuilder) error { + if len(data) < 12 { + p.SetTruncated() + return errors.New("IPSec AH packet less than 12 bytes") + } + i := &IPSecAH{ + ipv6ExtensionBase: ipv6ExtensionBase{ + NextHeader: IPProtocol(data[0]), + HeaderLength: data[1], + }, + Reserved: binary.BigEndian.Uint16(data[2:4]), + SPI: binary.BigEndian.Uint32(data[4:8]), + Seq: binary.BigEndian.Uint32(data[8:12]), + } + i.ActualLength = (int(i.HeaderLength) + 2) * 4 + if len(data) < i.ActualLength { + p.SetTruncated() + return errors.New("Truncated AH packet < ActualLength") + } + i.AuthenticationData = data[12:i.ActualLength] + i.Contents = data[:i.ActualLength] + i.Payload = data[i.ActualLength:] + p.AddLayer(i) + return p.NextDecoder(i.NextHeader) +} + +// IPSecESP is the encapsulating security payload defined in +// http://tools.ietf.org/html/rfc2406 +type IPSecESP struct { + BaseLayer + SPI, Seq uint32 + // Encrypted contains the encrypted set of bytes sent in an ESP + Encrypted []byte +} + +// LayerType returns LayerTypeIPSecESP. +func (i *IPSecESP) LayerType() gopacket.LayerType { return LayerTypeIPSecESP } + +func decodeIPSecESP(data []byte, p gopacket.PacketBuilder) error { + i := &IPSecESP{ + BaseLayer: BaseLayer{data, nil}, + SPI: binary.BigEndian.Uint32(data[:4]), + Seq: binary.BigEndian.Uint32(data[4:8]), + Encrypted: data[8:], + } + p.AddLayer(i) + return nil +} diff --git a/vendor/github.com/google/gopacket/layers/layertypes.go b/vendor/github.com/google/gopacket/layers/layertypes.go new file mode 100644 index 0000000000..69d25ae802 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/layertypes.go @@ -0,0 +1,223 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "github.com/google/gopacket" +) + +var ( + LayerTypeARP = gopacket.RegisterLayerType(10, gopacket.LayerTypeMetadata{Name: "ARP", Decoder: gopacket.DecodeFunc(decodeARP)}) + LayerTypeCiscoDiscovery = gopacket.RegisterLayerType(11, gopacket.LayerTypeMetadata{Name: "CiscoDiscovery", Decoder: gopacket.DecodeFunc(decodeCiscoDiscovery)}) + LayerTypeEthernetCTP = gopacket.RegisterLayerType(12, gopacket.LayerTypeMetadata{Name: "EthernetCTP", Decoder: gopacket.DecodeFunc(decodeEthernetCTP)}) + LayerTypeEthernetCTPForwardData = gopacket.RegisterLayerType(13, gopacket.LayerTypeMetadata{Name: "EthernetCTPForwardData", Decoder: nil}) + LayerTypeEthernetCTPReply = gopacket.RegisterLayerType(14, gopacket.LayerTypeMetadata{Name: "EthernetCTPReply", Decoder: nil}) + LayerTypeDot1Q = gopacket.RegisterLayerType(15, gopacket.LayerTypeMetadata{Name: "Dot1Q", Decoder: gopacket.DecodeFunc(decodeDot1Q)}) + LayerTypeEtherIP = gopacket.RegisterLayerType(16, gopacket.LayerTypeMetadata{Name: "EtherIP", Decoder: gopacket.DecodeFunc(decodeEtherIP)}) + LayerTypeEthernet = gopacket.RegisterLayerType(17, gopacket.LayerTypeMetadata{Name: "Ethernet", Decoder: gopacket.DecodeFunc(decodeEthernet)}) + LayerTypeGRE = gopacket.RegisterLayerType(18, gopacket.LayerTypeMetadata{Name: "GRE", Decoder: gopacket.DecodeFunc(decodeGRE)}) + LayerTypeICMPv4 = gopacket.RegisterLayerType(19, gopacket.LayerTypeMetadata{Name: "ICMPv4", Decoder: gopacket.DecodeFunc(decodeICMPv4)}) + LayerTypeIPv4 = gopacket.RegisterLayerType(20, gopacket.LayerTypeMetadata{Name: "IPv4", Decoder: gopacket.DecodeFunc(decodeIPv4)}) + LayerTypeIPv6 = gopacket.RegisterLayerType(21, gopacket.LayerTypeMetadata{Name: "IPv6", Decoder: gopacket.DecodeFunc(decodeIPv6)}) + LayerTypeLLC = gopacket.RegisterLayerType(22, gopacket.LayerTypeMetadata{Name: "LLC", Decoder: gopacket.DecodeFunc(decodeLLC)}) + LayerTypeSNAP = gopacket.RegisterLayerType(23, gopacket.LayerTypeMetadata{Name: "SNAP", Decoder: gopacket.DecodeFunc(decodeSNAP)}) + LayerTypeMPLS = gopacket.RegisterLayerType(24, gopacket.LayerTypeMetadata{Name: "MPLS", Decoder: gopacket.DecodeFunc(decodeMPLS)}) + LayerTypePPP = gopacket.RegisterLayerType(25, gopacket.LayerTypeMetadata{Name: "PPP", Decoder: gopacket.DecodeFunc(decodePPP)}) + LayerTypePPPoE = gopacket.RegisterLayerType(26, gopacket.LayerTypeMetadata{Name: "PPPoE", Decoder: gopacket.DecodeFunc(decodePPPoE)}) + LayerTypeRUDP = gopacket.RegisterLayerType(27, gopacket.LayerTypeMetadata{Name: "RUDP", Decoder: gopacket.DecodeFunc(decodeRUDP)}) + LayerTypeSCTP = gopacket.RegisterLayerType(28, gopacket.LayerTypeMetadata{Name: "SCTP", Decoder: gopacket.DecodeFunc(decodeSCTP)}) + LayerTypeSCTPUnknownChunkType = gopacket.RegisterLayerType(29, gopacket.LayerTypeMetadata{Name: "SCTPUnknownChunkType", Decoder: nil}) + LayerTypeSCTPData = gopacket.RegisterLayerType(30, gopacket.LayerTypeMetadata{Name: "SCTPData", Decoder: nil}) + LayerTypeSCTPInit = gopacket.RegisterLayerType(31, gopacket.LayerTypeMetadata{Name: "SCTPInit", Decoder: nil}) + LayerTypeSCTPSack = gopacket.RegisterLayerType(32, gopacket.LayerTypeMetadata{Name: "SCTPSack", Decoder: nil}) + LayerTypeSCTPHeartbeat = gopacket.RegisterLayerType(33, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeat", Decoder: nil}) + LayerTypeSCTPError = gopacket.RegisterLayerType(34, gopacket.LayerTypeMetadata{Name: "SCTPError", Decoder: nil}) + LayerTypeSCTPShutdown = gopacket.RegisterLayerType(35, gopacket.LayerTypeMetadata{Name: "SCTPShutdown", Decoder: nil}) + LayerTypeSCTPShutdownAck = gopacket.RegisterLayerType(36, gopacket.LayerTypeMetadata{Name: "SCTPShutdownAck", Decoder: nil}) + LayerTypeSCTPCookieEcho = gopacket.RegisterLayerType(37, gopacket.LayerTypeMetadata{Name: "SCTPCookieEcho", Decoder: nil}) + LayerTypeSCTPEmptyLayer = gopacket.RegisterLayerType(38, gopacket.LayerTypeMetadata{Name: "SCTPEmptyLayer", Decoder: nil}) + LayerTypeSCTPInitAck = gopacket.RegisterLayerType(39, gopacket.LayerTypeMetadata{Name: "SCTPInitAck", Decoder: nil}) + LayerTypeSCTPHeartbeatAck = gopacket.RegisterLayerType(40, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeatAck", Decoder: nil}) + LayerTypeSCTPAbort = gopacket.RegisterLayerType(41, gopacket.LayerTypeMetadata{Name: "SCTPAbort", Decoder: nil}) + LayerTypeSCTPShutdownComplete = gopacket.RegisterLayerType(42, gopacket.LayerTypeMetadata{Name: "SCTPShutdownComplete", Decoder: nil}) + LayerTypeSCTPCookieAck = gopacket.RegisterLayerType(43, gopacket.LayerTypeMetadata{Name: "SCTPCookieAck", Decoder: nil}) + LayerTypeTCP = gopacket.RegisterLayerType(44, gopacket.LayerTypeMetadata{Name: "TCP", Decoder: gopacket.DecodeFunc(decodeTCP)}) + LayerTypeUDP = gopacket.RegisterLayerType(45, gopacket.LayerTypeMetadata{Name: "UDP", Decoder: gopacket.DecodeFunc(decodeUDP)}) + LayerTypeIPv6HopByHop = gopacket.RegisterLayerType(46, gopacket.LayerTypeMetadata{Name: "IPv6HopByHop", Decoder: gopacket.DecodeFunc(decodeIPv6HopByHop)}) + LayerTypeIPv6Routing = gopacket.RegisterLayerType(47, gopacket.LayerTypeMetadata{Name: "IPv6Routing", Decoder: gopacket.DecodeFunc(decodeIPv6Routing)}) + LayerTypeIPv6Fragment = gopacket.RegisterLayerType(48, gopacket.LayerTypeMetadata{Name: "IPv6Fragment", Decoder: gopacket.DecodeFunc(decodeIPv6Fragment)}) + LayerTypeIPv6Destination = gopacket.RegisterLayerType(49, gopacket.LayerTypeMetadata{Name: "IPv6Destination", Decoder: gopacket.DecodeFunc(decodeIPv6Destination)}) + LayerTypeIPSecAH = gopacket.RegisterLayerType(50, gopacket.LayerTypeMetadata{Name: "IPSecAH", Decoder: gopacket.DecodeFunc(decodeIPSecAH)}) + LayerTypeIPSecESP = gopacket.RegisterLayerType(51, gopacket.LayerTypeMetadata{Name: "IPSecESP", Decoder: gopacket.DecodeFunc(decodeIPSecESP)}) + LayerTypeUDPLite = gopacket.RegisterLayerType(52, gopacket.LayerTypeMetadata{Name: "UDPLite", Decoder: gopacket.DecodeFunc(decodeUDPLite)}) + LayerTypeFDDI = gopacket.RegisterLayerType(53, gopacket.LayerTypeMetadata{Name: "FDDI", Decoder: gopacket.DecodeFunc(decodeFDDI)}) + LayerTypeLoopback = gopacket.RegisterLayerType(54, gopacket.LayerTypeMetadata{Name: "Loopback", Decoder: gopacket.DecodeFunc(decodeLoopback)}) + LayerTypeEAP = gopacket.RegisterLayerType(55, gopacket.LayerTypeMetadata{Name: "EAP", Decoder: gopacket.DecodeFunc(decodeEAP)}) + LayerTypeEAPOL = gopacket.RegisterLayerType(56, gopacket.LayerTypeMetadata{Name: "EAPOL", Decoder: gopacket.DecodeFunc(decodeEAPOL)}) + LayerTypeICMPv6 = gopacket.RegisterLayerType(57, gopacket.LayerTypeMetadata{Name: "ICMPv6", Decoder: gopacket.DecodeFunc(decodeICMPv6)}) + LayerTypeLinkLayerDiscovery = gopacket.RegisterLayerType(58, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscovery", Decoder: gopacket.DecodeFunc(decodeLinkLayerDiscovery)}) + LayerTypeCiscoDiscoveryInfo = gopacket.RegisterLayerType(59, gopacket.LayerTypeMetadata{Name: "CiscoDiscoveryInfo", Decoder: gopacket.DecodeFunc(decodeCiscoDiscoveryInfo)}) + LayerTypeLinkLayerDiscoveryInfo = gopacket.RegisterLayerType(60, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscoveryInfo", Decoder: nil}) + LayerTypeNortelDiscovery = gopacket.RegisterLayerType(61, gopacket.LayerTypeMetadata{Name: "NortelDiscovery", Decoder: gopacket.DecodeFunc(decodeNortelDiscovery)}) + LayerTypeIGMP = gopacket.RegisterLayerType(62, gopacket.LayerTypeMetadata{Name: "IGMP", Decoder: gopacket.DecodeFunc(decodeIGMP)}) + LayerTypePFLog = gopacket.RegisterLayerType(63, gopacket.LayerTypeMetadata{Name: "PFLog", Decoder: gopacket.DecodeFunc(decodePFLog)}) + LayerTypeRadioTap = gopacket.RegisterLayerType(64, gopacket.LayerTypeMetadata{Name: "RadioTap", Decoder: gopacket.DecodeFunc(decodeRadioTap)}) + LayerTypeDot11 = gopacket.RegisterLayerType(65, gopacket.LayerTypeMetadata{Name: "Dot11", Decoder: gopacket.DecodeFunc(decodeDot11)}) + LayerTypeDot11Ctrl = gopacket.RegisterLayerType(66, gopacket.LayerTypeMetadata{Name: "Dot11Ctrl", Decoder: gopacket.DecodeFunc(decodeDot11Ctrl)}) + LayerTypeDot11Data = gopacket.RegisterLayerType(67, gopacket.LayerTypeMetadata{Name: "Dot11Data", Decoder: gopacket.DecodeFunc(decodeDot11Data)}) + LayerTypeDot11DataCFAck = gopacket.RegisterLayerType(68, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAck)}) + LayerTypeDot11DataCFPoll = gopacket.RegisterLayerType(69, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFPoll)}) + LayerTypeDot11DataCFAckPoll = gopacket.RegisterLayerType(70, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAckPoll)}) + LayerTypeDot11DataNull = gopacket.RegisterLayerType(71, gopacket.LayerTypeMetadata{Name: "Dot11DataNull", Decoder: gopacket.DecodeFunc(decodeDot11DataNull)}) + LayerTypeDot11DataCFAckNoData = gopacket.RegisterLayerType(72, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAck)}) + LayerTypeDot11DataCFPollNoData = gopacket.RegisterLayerType(73, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFPoll)}) + LayerTypeDot11DataCFAckPollNoData = gopacket.RegisterLayerType(74, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAckPoll)}) + LayerTypeDot11DataQOSData = gopacket.RegisterLayerType(75, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSData", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSData)}) + LayerTypeDot11DataQOSDataCFAck = gopacket.RegisterLayerType(76, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAck)}) + LayerTypeDot11DataQOSDataCFPoll = gopacket.RegisterLayerType(77, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFPoll)}) + LayerTypeDot11DataQOSDataCFAckPoll = gopacket.RegisterLayerType(78, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAckPoll)}) + LayerTypeDot11DataQOSNull = gopacket.RegisterLayerType(79, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSNull", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSNull)}) + LayerTypeDot11DataQOSCFPollNoData = gopacket.RegisterLayerType(80, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSCFPollNoData)}) + LayerTypeDot11DataQOSCFAckPollNoData = gopacket.RegisterLayerType(81, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSCFAckPollNoData)}) + LayerTypeDot11InformationElement = gopacket.RegisterLayerType(82, gopacket.LayerTypeMetadata{Name: "Dot11InformationElement", Decoder: gopacket.DecodeFunc(decodeDot11InformationElement)}) + LayerTypeDot11CtrlCTS = gopacket.RegisterLayerType(83, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCTS", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCTS)}) + LayerTypeDot11CtrlRTS = gopacket.RegisterLayerType(84, gopacket.LayerTypeMetadata{Name: "Dot11CtrlRTS", Decoder: gopacket.DecodeFunc(decodeDot11CtrlRTS)}) + LayerTypeDot11CtrlBlockAckReq = gopacket.RegisterLayerType(85, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAckReq", Decoder: gopacket.DecodeFunc(decodeDot11CtrlBlockAckReq)}) + LayerTypeDot11CtrlBlockAck = gopacket.RegisterLayerType(86, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlBlockAck)}) + LayerTypeDot11CtrlPowersavePoll = gopacket.RegisterLayerType(87, gopacket.LayerTypeMetadata{Name: "Dot11CtrlPowersavePoll", Decoder: gopacket.DecodeFunc(decodeDot11CtrlPowersavePoll)}) + LayerTypeDot11CtrlAck = gopacket.RegisterLayerType(88, gopacket.LayerTypeMetadata{Name: "Dot11CtrlAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlAck)}) + LayerTypeDot11CtrlCFEnd = gopacket.RegisterLayerType(89, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEnd", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCFEnd)}) + LayerTypeDot11CtrlCFEndAck = gopacket.RegisterLayerType(90, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEndAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCFEndAck)}) + LayerTypeDot11MgmtAssociationReq = gopacket.RegisterLayerType(91, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAssociationReq)}) + LayerTypeDot11MgmtAssociationResp = gopacket.RegisterLayerType(92, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAssociationResp)}) + LayerTypeDot11MgmtReassociationReq = gopacket.RegisterLayerType(93, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtReassociationReq)}) + LayerTypeDot11MgmtReassociationResp = gopacket.RegisterLayerType(94, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtReassociationResp)}) + LayerTypeDot11MgmtProbeReq = gopacket.RegisterLayerType(95, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtProbeReq)}) + LayerTypeDot11MgmtProbeResp = gopacket.RegisterLayerType(96, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtProbeResp)}) + LayerTypeDot11MgmtMeasurementPilot = gopacket.RegisterLayerType(97, gopacket.LayerTypeMetadata{Name: "Dot11MgmtMeasurementPilot", Decoder: gopacket.DecodeFunc(decodeDot11MgmtMeasurementPilot)}) + LayerTypeDot11MgmtBeacon = gopacket.RegisterLayerType(98, gopacket.LayerTypeMetadata{Name: "Dot11MgmtBeacon", Decoder: gopacket.DecodeFunc(decodeDot11MgmtBeacon)}) + LayerTypeDot11MgmtATIM = gopacket.RegisterLayerType(99, gopacket.LayerTypeMetadata{Name: "Dot11MgmtATIM", Decoder: gopacket.DecodeFunc(decodeDot11MgmtATIM)}) + LayerTypeDot11MgmtDisassociation = gopacket.RegisterLayerType(100, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDisassociation", Decoder: gopacket.DecodeFunc(decodeDot11MgmtDisassociation)}) + LayerTypeDot11MgmtAuthentication = gopacket.RegisterLayerType(101, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAuthentication", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAuthentication)}) + LayerTypeDot11MgmtDeauthentication = gopacket.RegisterLayerType(102, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDeauthentication", Decoder: gopacket.DecodeFunc(decodeDot11MgmtDeauthentication)}) + LayerTypeDot11MgmtAction = gopacket.RegisterLayerType(103, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAction", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAction)}) + LayerTypeDot11MgmtActionNoAck = gopacket.RegisterLayerType(104, gopacket.LayerTypeMetadata{Name: "Dot11MgmtActionNoAck", Decoder: gopacket.DecodeFunc(decodeDot11MgmtActionNoAck)}) + LayerTypeDot11MgmtArubaWLAN = gopacket.RegisterLayerType(105, gopacket.LayerTypeMetadata{Name: "Dot11MgmtArubaWLAN", Decoder: gopacket.DecodeFunc(decodeDot11MgmtArubaWLAN)}) + LayerTypeDot11WEP = gopacket.RegisterLayerType(106, gopacket.LayerTypeMetadata{Name: "Dot11WEP", Decoder: gopacket.DecodeFunc(decodeDot11WEP)}) + LayerTypeDNS = gopacket.RegisterLayerType(107, gopacket.LayerTypeMetadata{Name: "DNS", Decoder: gopacket.DecodeFunc(decodeDNS)}) + LayerTypeUSB = gopacket.RegisterLayerType(108, gopacket.LayerTypeMetadata{Name: "USB", Decoder: gopacket.DecodeFunc(decodeUSB)}) + LayerTypeUSBRequestBlockSetup = gopacket.RegisterLayerType(109, gopacket.LayerTypeMetadata{Name: "USBRequestBlockSetup", Decoder: gopacket.DecodeFunc(decodeUSBRequestBlockSetup)}) + LayerTypeUSBControl = gopacket.RegisterLayerType(110, gopacket.LayerTypeMetadata{Name: "USBControl", Decoder: gopacket.DecodeFunc(decodeUSBControl)}) + LayerTypeUSBInterrupt = gopacket.RegisterLayerType(111, gopacket.LayerTypeMetadata{Name: "USBInterrupt", Decoder: gopacket.DecodeFunc(decodeUSBInterrupt)}) + LayerTypeUSBBulk = gopacket.RegisterLayerType(112, gopacket.LayerTypeMetadata{Name: "USBBulk", Decoder: gopacket.DecodeFunc(decodeUSBBulk)}) + LayerTypeLinuxSLL = gopacket.RegisterLayerType(113, gopacket.LayerTypeMetadata{Name: "Linux SLL", Decoder: gopacket.DecodeFunc(decodeLinuxSLL)}) + LayerTypeSFlow = gopacket.RegisterLayerType(114, gopacket.LayerTypeMetadata{Name: "SFlow", Decoder: gopacket.DecodeFunc(decodeSFlow)}) + LayerTypePrismHeader = gopacket.RegisterLayerType(115, gopacket.LayerTypeMetadata{Name: "Prism monitor mode header", Decoder: gopacket.DecodeFunc(decodePrismHeader)}) + LayerTypeVXLAN = gopacket.RegisterLayerType(116, gopacket.LayerTypeMetadata{Name: "VXLAN", Decoder: gopacket.DecodeFunc(decodeVXLAN)}) + LayerTypeNTP = gopacket.RegisterLayerType(117, gopacket.LayerTypeMetadata{Name: "NTP", Decoder: gopacket.DecodeFunc(decodeNTP)}) + LayerTypeDHCPv4 = gopacket.RegisterLayerType(118, gopacket.LayerTypeMetadata{Name: "DHCPv4", Decoder: gopacket.DecodeFunc(decodeDHCPv4)}) + LayerTypeVRRP = gopacket.RegisterLayerType(119, gopacket.LayerTypeMetadata{Name: "VRRP", Decoder: gopacket.DecodeFunc(decodeVRRP)}) + LayerTypeGeneve = gopacket.RegisterLayerType(120, gopacket.LayerTypeMetadata{Name: "Geneve", Decoder: gopacket.DecodeFunc(decodeGeneve)}) + LayerTypeSTP = gopacket.RegisterLayerType(121, gopacket.LayerTypeMetadata{Name: "STP", Decoder: gopacket.DecodeFunc(decodeSTP)}) + LayerTypeBFD = gopacket.RegisterLayerType(122, gopacket.LayerTypeMetadata{Name: "BFD", Decoder: gopacket.DecodeFunc(decodeBFD)}) + LayerTypeOSPF = gopacket.RegisterLayerType(123, gopacket.LayerTypeMetadata{Name: "OSPF", Decoder: gopacket.DecodeFunc(decodeOSPF)}) + LayerTypeICMPv6RouterSolicitation = gopacket.RegisterLayerType(124, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterSolicitation", Decoder: gopacket.DecodeFunc(decodeICMPv6RouterSolicitation)}) + LayerTypeICMPv6RouterAdvertisement = gopacket.RegisterLayerType(125, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterAdvertisement", Decoder: gopacket.DecodeFunc(decodeICMPv6RouterAdvertisement)}) + LayerTypeICMPv6NeighborSolicitation = gopacket.RegisterLayerType(126, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborSolicitation", Decoder: gopacket.DecodeFunc(decodeICMPv6NeighborSolicitation)}) + LayerTypeICMPv6NeighborAdvertisement = gopacket.RegisterLayerType(127, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborAdvertisement", Decoder: gopacket.DecodeFunc(decodeICMPv6NeighborAdvertisement)}) + LayerTypeICMPv6Redirect = gopacket.RegisterLayerType(128, gopacket.LayerTypeMetadata{Name: "ICMPv6Redirect", Decoder: gopacket.DecodeFunc(decodeICMPv6Redirect)}) + LayerTypeGTPv1U = gopacket.RegisterLayerType(129, gopacket.LayerTypeMetadata{Name: "GTPv1U", Decoder: gopacket.DecodeFunc(decodeGTPv1u)}) + LayerTypeEAPOLKey = gopacket.RegisterLayerType(130, gopacket.LayerTypeMetadata{Name: "EAPOLKey", Decoder: gopacket.DecodeFunc(decodeEAPOLKey)}) + LayerTypeLCM = gopacket.RegisterLayerType(131, gopacket.LayerTypeMetadata{Name: "LCM", Decoder: gopacket.DecodeFunc(decodeLCM)}) + LayerTypeICMPv6Echo = gopacket.RegisterLayerType(132, gopacket.LayerTypeMetadata{Name: "ICMPv6Echo", Decoder: gopacket.DecodeFunc(decodeICMPv6Echo)}) + LayerTypeSIP = gopacket.RegisterLayerType(133, gopacket.LayerTypeMetadata{Name: "SIP", Decoder: gopacket.DecodeFunc(decodeSIP)}) + LayerTypeDHCPv6 = gopacket.RegisterLayerType(134, gopacket.LayerTypeMetadata{Name: "DHCPv6", Decoder: gopacket.DecodeFunc(decodeDHCPv6)}) + LayerTypeMLDv1MulticastListenerReport = gopacket.RegisterLayerType(135, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerReport", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerReport)}) + LayerTypeMLDv1MulticastListenerDone = gopacket.RegisterLayerType(136, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerDone", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerDone)}) + LayerTypeMLDv1MulticastListenerQuery = gopacket.RegisterLayerType(137, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerQuery", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerQuery)}) + LayerTypeMLDv2MulticastListenerReport = gopacket.RegisterLayerType(138, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerReport", Decoder: gopacket.DecodeFunc(decodeMLDv2MulticastListenerReport)}) + LayerTypeMLDv2MulticastListenerQuery = gopacket.RegisterLayerType(139, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerQuery", Decoder: gopacket.DecodeFunc(decodeMLDv2MulticastListenerQuery)}) + LayerTypeTLS = gopacket.RegisterLayerType(140, gopacket.LayerTypeMetadata{Name: "TLS", Decoder: gopacket.DecodeFunc(decodeTLS)}) + LayerTypeModbusTCP = gopacket.RegisterLayerType(141, gopacket.LayerTypeMetadata{Name: "ModbusTCP", Decoder: gopacket.DecodeFunc(decodeModbusTCP)}) + LayerTypeRMCP = gopacket.RegisterLayerType(142, gopacket.LayerTypeMetadata{Name: "RMCP", Decoder: gopacket.DecodeFunc(decodeRMCP)}) + LayerTypeASF = gopacket.RegisterLayerType(143, gopacket.LayerTypeMetadata{Name: "ASF", Decoder: gopacket.DecodeFunc(decodeASF)}) + LayerTypeASFPresencePong = gopacket.RegisterLayerType(144, gopacket.LayerTypeMetadata{Name: "ASFPresencePong", Decoder: gopacket.DecodeFunc(decodeASFPresencePong)}) + LayerTypeERSPANII = gopacket.RegisterLayerType(145, gopacket.LayerTypeMetadata{Name: "ERSPAN Type II", Decoder: gopacket.DecodeFunc(decodeERSPANII)}) + LayerTypeRADIUS = gopacket.RegisterLayerType(146, gopacket.LayerTypeMetadata{Name: "RADIUS", Decoder: gopacket.DecodeFunc(decodeRADIUS)}) +) + +var ( + // LayerClassIPNetwork contains TCP/IP network layer types. + LayerClassIPNetwork = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeIPv4, + LayerTypeIPv6, + }) + // LayerClassIPTransport contains TCP/IP transport layer types. + LayerClassIPTransport = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeTCP, + LayerTypeUDP, + LayerTypeSCTP, + }) + // LayerClassIPControl contains TCP/IP control protocols. + LayerClassIPControl = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeICMPv4, + LayerTypeICMPv6, + }) + // LayerClassSCTPChunk contains SCTP chunk types (not the top-level SCTP + // layer). + LayerClassSCTPChunk = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeSCTPUnknownChunkType, + LayerTypeSCTPData, + LayerTypeSCTPInit, + LayerTypeSCTPSack, + LayerTypeSCTPHeartbeat, + LayerTypeSCTPError, + LayerTypeSCTPShutdown, + LayerTypeSCTPShutdownAck, + LayerTypeSCTPCookieEcho, + LayerTypeSCTPEmptyLayer, + LayerTypeSCTPInitAck, + LayerTypeSCTPHeartbeatAck, + LayerTypeSCTPAbort, + LayerTypeSCTPShutdownComplete, + LayerTypeSCTPCookieAck, + }) + // LayerClassIPv6Extension contains IPv6 extension headers. + LayerClassIPv6Extension = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeIPv6HopByHop, + LayerTypeIPv6Routing, + LayerTypeIPv6Fragment, + LayerTypeIPv6Destination, + }) + LayerClassIPSec = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeIPSecAH, + LayerTypeIPSecESP, + }) + // LayerClassICMPv6NDP contains ICMPv6 neighbor discovery protocol + // messages. + LayerClassICMPv6NDP = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeICMPv6RouterSolicitation, + LayerTypeICMPv6RouterAdvertisement, + LayerTypeICMPv6NeighborSolicitation, + LayerTypeICMPv6NeighborAdvertisement, + LayerTypeICMPv6Redirect, + }) + // LayerClassMLDv1 contains multicast listener discovery protocol + LayerClassMLDv1 = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeMLDv1MulticastListenerQuery, + LayerTypeMLDv1MulticastListenerReport, + LayerTypeMLDv1MulticastListenerDone, + }) + // LayerClassMLDv2 contains multicast listener discovery protocol v2 + LayerClassMLDv2 = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeMLDv1MulticastListenerReport, + LayerTypeMLDv1MulticastListenerDone, + LayerTypeMLDv2MulticastListenerReport, + LayerTypeMLDv1MulticastListenerQuery, + LayerTypeMLDv2MulticastListenerQuery, + }) +) diff --git a/vendor/github.com/google/gopacket/layers/lcm.go b/vendor/github.com/google/gopacket/layers/lcm.go new file mode 100644 index 0000000000..58a4b82890 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/lcm.go @@ -0,0 +1,218 @@ +// Copyright 2018 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/google/gopacket" +) + +const ( + // LCMShortHeaderMagic is the LCM small message header magic number + LCMShortHeaderMagic uint32 = 0x4c433032 + // LCMFragmentedHeaderMagic is the LCM fragmented message header magic number + LCMFragmentedHeaderMagic uint32 = 0x4c433033 +) + +// LCM (Lightweight Communications and Marshalling) is a set of libraries and +// tools for message passing and data marshalling, targeted at real-time systems +// where high-bandwidth and low latency are critical. It provides a +// publish/subscribe message passing model and automatic +// marshalling/unmarshalling code generation with bindings for applications in a +// variety of programming languages. +// +// References +// https://lcm-proj.github.io/ +// https://github.com/lcm-proj/lcm +type LCM struct { + // Common (short & fragmented header) fields + Magic uint32 + SequenceNumber uint32 + // Fragmented header only fields + PayloadSize uint32 + FragmentOffset uint32 + FragmentNumber uint16 + TotalFragments uint16 + // Common field + ChannelName string + // Gopacket helper fields + Fragmented bool + fingerprint LCMFingerprint + contents []byte + payload []byte +} + +// LCMFingerprint is the type of a LCM fingerprint. +type LCMFingerprint uint64 + +var ( + // lcmLayerTypes contains a map of all LCM fingerprints that we support and + // their LayerType + lcmLayerTypes = map[LCMFingerprint]gopacket.LayerType{} + layerTypeIndex = 1001 +) + +// RegisterLCMLayerType allows users to register decoders for the underlying +// LCM payload. This is done based on the fingerprint that every LCM message +// contains and which identifies it uniquely. If num is not the zero value it +// will be used when registering with RegisterLayerType towards gopacket, +// otherwise an incremental value starting from 1001 will be used. +func RegisterLCMLayerType(num int, name string, fingerprint LCMFingerprint, + decoder gopacket.Decoder) gopacket.LayerType { + metadata := gopacket.LayerTypeMetadata{Name: name, Decoder: decoder} + + if num == 0 { + num = layerTypeIndex + layerTypeIndex++ + } + + lcmLayerTypes[fingerprint] = gopacket.RegisterLayerType(num, metadata) + + return lcmLayerTypes[fingerprint] +} + +// SupportedLCMFingerprints returns a slice of all LCM fingerprints that has +// been registered so far. +func SupportedLCMFingerprints() []LCMFingerprint { + fingerprints := make([]LCMFingerprint, 0, len(lcmLayerTypes)) + for fp := range lcmLayerTypes { + fingerprints = append(fingerprints, fp) + } + return fingerprints +} + +// GetLCMLayerType returns the underlying LCM message's LayerType. +// This LayerType has to be registered by using RegisterLCMLayerType. +func GetLCMLayerType(fingerprint LCMFingerprint) gopacket.LayerType { + layerType, ok := lcmLayerTypes[fingerprint] + if !ok { + return gopacket.LayerTypePayload + } + + return layerType +} + +func decodeLCM(data []byte, p gopacket.PacketBuilder) error { + lcm := &LCM{} + + err := lcm.DecodeFromBytes(data, p) + if err != nil { + return err + } + + p.AddLayer(lcm) + p.SetApplicationLayer(lcm) + + return p.NextDecoder(lcm.NextLayerType()) +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (lcm *LCM) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + df.SetTruncated() + return errors.New("LCM < 8 bytes") + } + offset := 0 + + lcm.Magic = binary.BigEndian.Uint32(data[offset:4]) + offset += 4 + + if lcm.Magic != LCMShortHeaderMagic && lcm.Magic != LCMFragmentedHeaderMagic { + return fmt.Errorf("Received LCM header magic %v does not match know "+ + "LCM magic numbers. Dropping packet.", lcm.Magic) + } + + lcm.SequenceNumber = binary.BigEndian.Uint32(data[offset:8]) + offset += 4 + + if lcm.Magic == LCMFragmentedHeaderMagic { + lcm.Fragmented = true + + lcm.PayloadSize = binary.BigEndian.Uint32(data[offset : offset+4]) + offset += 4 + + lcm.FragmentOffset = binary.BigEndian.Uint32(data[offset : offset+4]) + offset += 4 + + lcm.FragmentNumber = binary.BigEndian.Uint16(data[offset : offset+2]) + offset += 2 + + lcm.TotalFragments = binary.BigEndian.Uint16(data[offset : offset+2]) + offset += 2 + } else { + lcm.Fragmented = false + } + + if !lcm.Fragmented || (lcm.Fragmented && lcm.FragmentNumber == 0) { + buffer := make([]byte, 0) + for _, b := range data[offset:] { + offset++ + + if b == 0 { + break + } + + buffer = append(buffer, b) + } + + lcm.ChannelName = string(buffer) + } + + lcm.fingerprint = LCMFingerprint( + binary.BigEndian.Uint64(data[offset : offset+8])) + + lcm.contents = data[:offset] + lcm.payload = data[offset:] + + return nil +} + +// CanDecode returns a set of layers that LCM objects can decode. +// As LCM objects can only decode the LCM layer, we just return that layer. +func (lcm LCM) CanDecode() gopacket.LayerClass { + return LayerTypeLCM +} + +// NextLayerType specifies the LCM payload layer type following this header. +// As LCM packets are serialized structs with uniq fingerprints for each uniq +// combination of data types, lookup of correct layer type is based on that +// fingerprint. +func (lcm LCM) NextLayerType() gopacket.LayerType { + if !lcm.Fragmented || (lcm.Fragmented && lcm.FragmentNumber == 0) { + return GetLCMLayerType(lcm.fingerprint) + } + + return gopacket.LayerTypeFragment +} + +// LayerType returns LayerTypeLCM +func (lcm LCM) LayerType() gopacket.LayerType { + return LayerTypeLCM +} + +// LayerContents returns the contents of the LCM header. +func (lcm LCM) LayerContents() []byte { + return lcm.contents +} + +// LayerPayload returns the payload following this LCM header. +func (lcm LCM) LayerPayload() []byte { + return lcm.payload +} + +// Payload returns the payload following this LCM header. +func (lcm LCM) Payload() []byte { + return lcm.LayerPayload() +} + +// Fingerprint returns the LCM fingerprint of the underlying message. +func (lcm LCM) Fingerprint() LCMFingerprint { + return lcm.fingerprint +} diff --git a/vendor/github.com/google/gopacket/layers/linux_sll.go b/vendor/github.com/google/gopacket/layers/linux_sll.go new file mode 100644 index 0000000000..85a4f8bdd0 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/linux_sll.go @@ -0,0 +1,98 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + + "github.com/google/gopacket" +) + +type LinuxSLLPacketType uint16 + +const ( + LinuxSLLPacketTypeHost LinuxSLLPacketType = 0 // To us + LinuxSLLPacketTypeBroadcast LinuxSLLPacketType = 1 // To all + LinuxSLLPacketTypeMulticast LinuxSLLPacketType = 2 // To group + LinuxSLLPacketTypeOtherhost LinuxSLLPacketType = 3 // To someone else + LinuxSLLPacketTypeOutgoing LinuxSLLPacketType = 4 // Outgoing of any type + // These ones are invisible by user level + LinuxSLLPacketTypeLoopback LinuxSLLPacketType = 5 // MC/BRD frame looped back + LinuxSLLPacketTypeFastroute LinuxSLLPacketType = 6 // Fastrouted frame +) + +func (l LinuxSLLPacketType) String() string { + switch l { + case LinuxSLLPacketTypeHost: + return "host" + case LinuxSLLPacketTypeBroadcast: + return "broadcast" + case LinuxSLLPacketTypeMulticast: + return "multicast" + case LinuxSLLPacketTypeOtherhost: + return "otherhost" + case LinuxSLLPacketTypeOutgoing: + return "outgoing" + case LinuxSLLPacketTypeLoopback: + return "loopback" + case LinuxSLLPacketTypeFastroute: + return "fastroute" + } + return fmt.Sprintf("Unknown(%d)", int(l)) +} + +type LinuxSLL struct { + BaseLayer + PacketType LinuxSLLPacketType + AddrLen uint16 + Addr net.HardwareAddr + EthernetType EthernetType + AddrType uint16 +} + +// LayerType returns LayerTypeLinuxSLL. +func (sll *LinuxSLL) LayerType() gopacket.LayerType { return LayerTypeLinuxSLL } + +func (sll *LinuxSLL) CanDecode() gopacket.LayerClass { + return LayerTypeLinuxSLL +} + +func (sll *LinuxSLL) LinkFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointMAC, sll.Addr, nil) +} + +func (sll *LinuxSLL) NextLayerType() gopacket.LayerType { + return sll.EthernetType.LayerType() +} + +func (sll *LinuxSLL) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 16 { + return errors.New("Linux SLL packet too small") + } + sll.PacketType = LinuxSLLPacketType(binary.BigEndian.Uint16(data[0:2])) + sll.AddrType = binary.BigEndian.Uint16(data[2:4]) + sll.AddrLen = binary.BigEndian.Uint16(data[4:6]) + + sll.Addr = net.HardwareAddr(data[6 : sll.AddrLen+6]) + sll.EthernetType = EthernetType(binary.BigEndian.Uint16(data[14:16])) + sll.BaseLayer = BaseLayer{data[:16], data[16:]} + + return nil +} + +func decodeLinuxSLL(data []byte, p gopacket.PacketBuilder) error { + sll := &LinuxSLL{} + if err := sll.DecodeFromBytes(data, p); err != nil { + return err + } + p.AddLayer(sll) + p.SetLinkLayer(sll) + return p.NextDecoder(sll.EthernetType) +} diff --git a/vendor/github.com/google/gopacket/layers/llc.go b/vendor/github.com/google/gopacket/layers/llc.go new file mode 100644 index 0000000000..cad6803671 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/llc.go @@ -0,0 +1,193 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/google/gopacket" +) + +// LLC is the layer used for 802.2 Logical Link Control headers. +// See http://standards.ieee.org/getieee802/download/802.2-1998.pdf +type LLC struct { + BaseLayer + DSAP uint8 + IG bool // true means group, false means individual + SSAP uint8 + CR bool // true means response, false means command + Control uint16 +} + +// LayerType returns gopacket.LayerTypeLLC. +func (l *LLC) LayerType() gopacket.LayerType { return LayerTypeLLC } + +// DecodeFromBytes decodes the given bytes into this layer. +func (l *LLC) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 3 { + return errors.New("LLC header too small") + } + l.DSAP = data[0] & 0xFE + l.IG = data[0]&0x1 != 0 + l.SSAP = data[1] & 0xFE + l.CR = data[1]&0x1 != 0 + l.Control = uint16(data[2]) + + if l.Control&0x1 == 0 || l.Control&0x3 == 0x1 { + if len(data) < 4 { + return errors.New("LLC header too small") + } + l.Control = l.Control<<8 | uint16(data[3]) + l.Contents = data[:4] + l.Payload = data[4:] + } else { + l.Contents = data[:3] + l.Payload = data[3:] + } + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (l *LLC) CanDecode() gopacket.LayerClass { + return LayerTypeLLC +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (l *LLC) NextLayerType() gopacket.LayerType { + switch { + case l.DSAP == 0xAA && l.SSAP == 0xAA: + return LayerTypeSNAP + case l.DSAP == 0x42 && l.SSAP == 0x42: + return LayerTypeSTP + } + return gopacket.LayerTypeZero // Not implemented +} + +// SNAP is used inside LLC. See +// http://standards.ieee.org/getieee802/download/802-2001.pdf. +// From http://en.wikipedia.org/wiki/Subnetwork_Access_Protocol: +// "[T]he Subnetwork Access Protocol (SNAP) is a mechanism for multiplexing, +// on networks using IEEE 802.2 LLC, more protocols than can be distinguished +// by the 8-bit 802.2 Service Access Point (SAP) fields." +type SNAP struct { + BaseLayer + OrganizationalCode []byte + Type EthernetType +} + +// LayerType returns gopacket.LayerTypeSNAP. +func (s *SNAP) LayerType() gopacket.LayerType { return LayerTypeSNAP } + +// DecodeFromBytes decodes the given bytes into this layer. +func (s *SNAP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 5 { + return errors.New("SNAP header too small") + } + s.OrganizationalCode = data[:3] + s.Type = EthernetType(binary.BigEndian.Uint16(data[3:5])) + s.BaseLayer = BaseLayer{data[:5], data[5:]} + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (s *SNAP) CanDecode() gopacket.LayerClass { + return LayerTypeSNAP +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (s *SNAP) NextLayerType() gopacket.LayerType { + // See BUG(gconnel) in decodeSNAP + return s.Type.LayerType() +} + +func decodeLLC(data []byte, p gopacket.PacketBuilder) error { + l := &LLC{} + err := l.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(l) + return p.NextDecoder(l.NextLayerType()) +} + +func decodeSNAP(data []byte, p gopacket.PacketBuilder) error { + s := &SNAP{} + err := s.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(s) + // BUG(gconnell): When decoding SNAP, we treat the SNAP type as an Ethernet + // type. This may not actually be an ethernet type in all cases, + // depending on the organizational code. Right now, we don't check. + return p.NextDecoder(s.Type) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (l *LLC) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var igFlag, crFlag byte + var length int + + if l.Control&0xFF00 != 0 { + length = 4 + } else { + length = 3 + } + + if l.DSAP&0x1 != 0 { + return errors.New("DSAP value invalid, should not include IG flag bit") + } + + if l.SSAP&0x1 != 0 { + return errors.New("SSAP value invalid, should not include CR flag bit") + } + + if buf, err := b.PrependBytes(length); err != nil { + return err + } else { + igFlag = 0 + if l.IG { + igFlag = 0x1 + } + + crFlag = 0 + if l.CR { + crFlag = 0x1 + } + + buf[0] = l.DSAP + igFlag + buf[1] = l.SSAP + crFlag + + if length == 4 { + buf[2] = uint8(l.Control >> 8) + buf[3] = uint8(l.Control) + } else { + buf[2] = uint8(l.Control) + } + } + + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (s *SNAP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if buf, err := b.PrependBytes(5); err != nil { + return err + } else { + buf[0] = s.OrganizationalCode[0] + buf[1] = s.OrganizationalCode[1] + buf[2] = s.OrganizationalCode[2] + binary.BigEndian.PutUint16(buf[3:5], uint16(s.Type)) + } + + return nil +} diff --git a/vendor/github.com/google/gopacket/layers/lldp.go b/vendor/github.com/google/gopacket/layers/lldp.go new file mode 100644 index 0000000000..16a5bbadad --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/lldp.go @@ -0,0 +1,1603 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/google/gopacket" +) + +// LLDPTLVType is the type of each TLV value in a LinkLayerDiscovery packet. +type LLDPTLVType byte + +const ( + LLDPTLVEnd LLDPTLVType = 0 + LLDPTLVChassisID LLDPTLVType = 1 + LLDPTLVPortID LLDPTLVType = 2 + LLDPTLVTTL LLDPTLVType = 3 + LLDPTLVPortDescription LLDPTLVType = 4 + LLDPTLVSysName LLDPTLVType = 5 + LLDPTLVSysDescription LLDPTLVType = 6 + LLDPTLVSysCapabilities LLDPTLVType = 7 + LLDPTLVMgmtAddress LLDPTLVType = 8 + LLDPTLVOrgSpecific LLDPTLVType = 127 +) + +// LinkLayerDiscoveryValue is a TLV value inside a LinkLayerDiscovery packet layer. +type LinkLayerDiscoveryValue struct { + Type LLDPTLVType + Length uint16 + Value []byte +} + +func (c *LinkLayerDiscoveryValue) len() int { + return 0 +} + +// LLDPChassisIDSubType specifies the value type for a single LLDPChassisID.ID +type LLDPChassisIDSubType byte + +// LLDP Chassis Types +const ( + LLDPChassisIDSubTypeReserved LLDPChassisIDSubType = 0 + LLDPChassisIDSubTypeChassisComp LLDPChassisIDSubType = 1 + LLDPChassisIDSubtypeIfaceAlias LLDPChassisIDSubType = 2 + LLDPChassisIDSubTypePortComp LLDPChassisIDSubType = 3 + LLDPChassisIDSubTypeMACAddr LLDPChassisIDSubType = 4 + LLDPChassisIDSubTypeNetworkAddr LLDPChassisIDSubType = 5 + LLDPChassisIDSubtypeIfaceName LLDPChassisIDSubType = 6 + LLDPChassisIDSubTypeLocal LLDPChassisIDSubType = 7 +) + +type LLDPChassisID struct { + Subtype LLDPChassisIDSubType + ID []byte +} + +func (c *LLDPChassisID) serialize() []byte { + + var buf = make([]byte, c.serializedLen()) + idLen := uint16(LLDPTLVChassisID)<<9 | uint16(len(c.ID)+1) //id should take 7 bits, length should take 9 bits, +1 for subtype + binary.BigEndian.PutUint16(buf[0:2], idLen) + buf[2] = byte(c.Subtype) + copy(buf[3:], c.ID) + return buf +} + +func (c *LLDPChassisID) serializedLen() int { + return len(c.ID) + 3 // +2 for id and length, +1 for subtype +} + +// LLDPPortIDSubType specifies the value type for a single LLDPPortID.ID +type LLDPPortIDSubType byte + +// LLDP PortID types +const ( + LLDPPortIDSubtypeReserved LLDPPortIDSubType = 0 + LLDPPortIDSubtypeIfaceAlias LLDPPortIDSubType = 1 + LLDPPortIDSubtypePortComp LLDPPortIDSubType = 2 + LLDPPortIDSubtypeMACAddr LLDPPortIDSubType = 3 + LLDPPortIDSubtypeNetworkAddr LLDPPortIDSubType = 4 + LLDPPortIDSubtypeIfaceName LLDPPortIDSubType = 5 + LLDPPortIDSubtypeAgentCircuitID LLDPPortIDSubType = 6 + LLDPPortIDSubtypeLocal LLDPPortIDSubType = 7 +) + +type LLDPPortID struct { + Subtype LLDPPortIDSubType + ID []byte +} + +func (c *LLDPPortID) serialize() []byte { + + var buf = make([]byte, c.serializedLen()) + idLen := uint16(LLDPTLVPortID)<<9 | uint16(len(c.ID)+1) //id should take 7 bits, length should take 9 bits, +1 for subtype + binary.BigEndian.PutUint16(buf[0:2], idLen) + buf[2] = byte(c.Subtype) + copy(buf[3:], c.ID) + return buf +} + +func (c *LLDPPortID) serializedLen() int { + return len(c.ID) + 3 // +2 for id and length, +1 for subtype +} + +// LinkLayerDiscovery is a packet layer containing the LinkLayer Discovery Protocol. +// See http:http://standards.ieee.org/getieee802/download/802.1AB-2009.pdf +// ChassisID, PortID and TTL are mandatory TLV's. Other values can be decoded +// with DecodeValues() +type LinkLayerDiscovery struct { + BaseLayer + ChassisID LLDPChassisID + PortID LLDPPortID + TTL uint16 + Values []LinkLayerDiscoveryValue +} + +type IEEEOUI uint32 + +// http://standards.ieee.org/develop/regauth/oui/oui.txt +const ( + IEEEOUI8021 IEEEOUI = 0x0080c2 + IEEEOUI8023 IEEEOUI = 0x00120f + IEEEOUI80211 IEEEOUI = 0x000fac + IEEEOUI8021Qbg IEEEOUI = 0x0013BF + IEEEOUICisco2 IEEEOUI = 0x000142 + IEEEOUIMedia IEEEOUI = 0x0012bb // TR-41 + IEEEOUIProfinet IEEEOUI = 0x000ecf + IEEEOUIDCBX IEEEOUI = 0x001b21 +) + +// LLDPOrgSpecificTLV is an Organisation-specific TLV +type LLDPOrgSpecificTLV struct { + OUI IEEEOUI + SubType uint8 + Info []byte +} + +// LLDPCapabilities Types +const ( + LLDPCapsOther uint16 = 1 << 0 + LLDPCapsRepeater uint16 = 1 << 1 + LLDPCapsBridge uint16 = 1 << 2 + LLDPCapsWLANAP uint16 = 1 << 3 + LLDPCapsRouter uint16 = 1 << 4 + LLDPCapsPhone uint16 = 1 << 5 + LLDPCapsDocSis uint16 = 1 << 6 + LLDPCapsStationOnly uint16 = 1 << 7 + LLDPCapsCVLAN uint16 = 1 << 8 + LLDPCapsSVLAN uint16 = 1 << 9 + LLDPCapsTmpr uint16 = 1 << 10 +) + +// LLDPCapabilities represents the capabilities of a device +type LLDPCapabilities struct { + Other bool + Repeater bool + Bridge bool + WLANAP bool + Router bool + Phone bool + DocSis bool + StationOnly bool + CVLAN bool + SVLAN bool + TMPR bool +} + +type LLDPSysCapabilities struct { + SystemCap LLDPCapabilities + EnabledCap LLDPCapabilities +} + +type IANAAddressFamily byte + +// LLDP Management Address Subtypes +// http://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml +const ( + IANAAddressFamilyReserved IANAAddressFamily = 0 + IANAAddressFamilyIPV4 IANAAddressFamily = 1 + IANAAddressFamilyIPV6 IANAAddressFamily = 2 + IANAAddressFamilyNSAP IANAAddressFamily = 3 + IANAAddressFamilyHDLC IANAAddressFamily = 4 + IANAAddressFamilyBBN1822 IANAAddressFamily = 5 + IANAAddressFamily802 IANAAddressFamily = 6 + IANAAddressFamilyE163 IANAAddressFamily = 7 + IANAAddressFamilyE164 IANAAddressFamily = 8 + IANAAddressFamilyF69 IANAAddressFamily = 9 + IANAAddressFamilyX121 IANAAddressFamily = 10 + IANAAddressFamilyIPX IANAAddressFamily = 11 + IANAAddressFamilyAtalk IANAAddressFamily = 12 + IANAAddressFamilyDecnet IANAAddressFamily = 13 + IANAAddressFamilyBanyan IANAAddressFamily = 14 + IANAAddressFamilyE164NSAP IANAAddressFamily = 15 + IANAAddressFamilyDNS IANAAddressFamily = 16 + IANAAddressFamilyDistname IANAAddressFamily = 17 + IANAAddressFamilyASNumber IANAAddressFamily = 18 + IANAAddressFamilyXTPIPV4 IANAAddressFamily = 19 + IANAAddressFamilyXTPIPV6 IANAAddressFamily = 20 + IANAAddressFamilyXTP IANAAddressFamily = 21 + IANAAddressFamilyFcWWPN IANAAddressFamily = 22 + IANAAddressFamilyFcWWNN IANAAddressFamily = 23 + IANAAddressFamilyGWID IANAAddressFamily = 24 + IANAAddressFamilyL2VPN IANAAddressFamily = 25 +) + +type LLDPInterfaceSubtype byte + +// LLDP Interface Subtypes +const ( + LLDPInterfaceSubtypeUnknown LLDPInterfaceSubtype = 1 + LLDPInterfaceSubtypeifIndex LLDPInterfaceSubtype = 2 + LLDPInterfaceSubtypeSysPort LLDPInterfaceSubtype = 3 +) + +type LLDPMgmtAddress struct { + Subtype IANAAddressFamily + Address []byte + InterfaceSubtype LLDPInterfaceSubtype + InterfaceNumber uint32 + OID string +} + +// LinkLayerDiscoveryInfo represents the decoded details for a set of LinkLayerDiscoveryValues +// Organisation-specific TLV's can be decoded using the various Decode() methods +type LinkLayerDiscoveryInfo struct { + BaseLayer + PortDescription string + SysName string + SysDescription string + SysCapabilities LLDPSysCapabilities + MgmtAddress LLDPMgmtAddress + OrgTLVs []LLDPOrgSpecificTLV // Private TLVs + Unknown []LinkLayerDiscoveryValue // undecoded TLVs +} + +/// IEEE 802.1 TLV Subtypes +const ( + LLDP8021SubtypePortVLANID uint8 = 1 + LLDP8021SubtypeProtocolVLANID uint8 = 2 + LLDP8021SubtypeVLANName uint8 = 3 + LLDP8021SubtypeProtocolIdentity uint8 = 4 + LLDP8021SubtypeVDIUsageDigest uint8 = 5 + LLDP8021SubtypeManagementVID uint8 = 6 + LLDP8021SubtypeLinkAggregation uint8 = 7 +) + +// VLAN Port Protocol ID options +const ( + LLDPProtocolVLANIDCapability byte = 1 << 1 + LLDPProtocolVLANIDStatus byte = 1 << 2 +) + +type PortProtocolVLANID struct { + Supported bool + Enabled bool + ID uint16 +} + +type VLANName struct { + ID uint16 + Name string +} + +type ProtocolIdentity []byte + +// LACP options +const ( + LLDPAggregationCapability byte = 1 << 0 + LLDPAggregationStatus byte = 1 << 1 +) + +// IEEE 802 Link Aggregation parameters +type LLDPLinkAggregation struct { + Supported bool + Enabled bool + PortID uint32 +} + +// LLDPInfo8021 represents the information carried in 802.1 Org-specific TLVs +type LLDPInfo8021 struct { + PVID uint16 + PPVIDs []PortProtocolVLANID + VLANNames []VLANName + ProtocolIdentities []ProtocolIdentity + VIDUsageDigest uint32 + ManagementVID uint16 + LinkAggregation LLDPLinkAggregation +} + +// IEEE 802.3 TLV Subtypes +const ( + LLDP8023SubtypeMACPHY uint8 = 1 + LLDP8023SubtypeMDIPower uint8 = 2 + LLDP8023SubtypeLinkAggregation uint8 = 3 + LLDP8023SubtypeMTU uint8 = 4 +) + +// MACPHY options +const ( + LLDPMACPHYCapability byte = 1 << 0 + LLDPMACPHYStatus byte = 1 << 1 +) + +// From IANA-MAU-MIB (introduced by RFC 4836) - dot3MauType +const ( + LLDPMAUTypeUnknown uint16 = 0 + LLDPMAUTypeAUI uint16 = 1 + LLDPMAUType10Base5 uint16 = 2 + LLDPMAUTypeFOIRL uint16 = 3 + LLDPMAUType10Base2 uint16 = 4 + LLDPMAUType10BaseT uint16 = 5 + LLDPMAUType10BaseFP uint16 = 6 + LLDPMAUType10BaseFB uint16 = 7 + LLDPMAUType10BaseFL uint16 = 8 + LLDPMAUType10BROAD36 uint16 = 9 + LLDPMAUType10BaseT_HD uint16 = 10 + LLDPMAUType10BaseT_FD uint16 = 11 + LLDPMAUType10BaseFL_HD uint16 = 12 + LLDPMAUType10BaseFL_FD uint16 = 13 + LLDPMAUType100BaseT4 uint16 = 14 + LLDPMAUType100BaseTX_HD uint16 = 15 + LLDPMAUType100BaseTX_FD uint16 = 16 + LLDPMAUType100BaseFX_HD uint16 = 17 + LLDPMAUType100BaseFX_FD uint16 = 18 + LLDPMAUType100BaseT2_HD uint16 = 19 + LLDPMAUType100BaseT2_FD uint16 = 20 + LLDPMAUType1000BaseX_HD uint16 = 21 + LLDPMAUType1000BaseX_FD uint16 = 22 + LLDPMAUType1000BaseLX_HD uint16 = 23 + LLDPMAUType1000BaseLX_FD uint16 = 24 + LLDPMAUType1000BaseSX_HD uint16 = 25 + LLDPMAUType1000BaseSX_FD uint16 = 26 + LLDPMAUType1000BaseCX_HD uint16 = 27 + LLDPMAUType1000BaseCX_FD uint16 = 28 + LLDPMAUType1000BaseT_HD uint16 = 29 + LLDPMAUType1000BaseT_FD uint16 = 30 + LLDPMAUType10GBaseX uint16 = 31 + LLDPMAUType10GBaseLX4 uint16 = 32 + LLDPMAUType10GBaseR uint16 = 33 + LLDPMAUType10GBaseER uint16 = 34 + LLDPMAUType10GBaseLR uint16 = 35 + LLDPMAUType10GBaseSR uint16 = 36 + LLDPMAUType10GBaseW uint16 = 37 + LLDPMAUType10GBaseEW uint16 = 38 + LLDPMAUType10GBaseLW uint16 = 39 + LLDPMAUType10GBaseSW uint16 = 40 + LLDPMAUType10GBaseCX4 uint16 = 41 + LLDPMAUType2BaseTL uint16 = 42 + LLDPMAUType10PASS_TS uint16 = 43 + LLDPMAUType100BaseBX10D uint16 = 44 + LLDPMAUType100BaseBX10U uint16 = 45 + LLDPMAUType100BaseLX10 uint16 = 46 + LLDPMAUType1000BaseBX10D uint16 = 47 + LLDPMAUType1000BaseBX10U uint16 = 48 + LLDPMAUType1000BaseLX10 uint16 = 49 + LLDPMAUType1000BasePX10D uint16 = 50 + LLDPMAUType1000BasePX10U uint16 = 51 + LLDPMAUType1000BasePX20D uint16 = 52 + LLDPMAUType1000BasePX20U uint16 = 53 + LLDPMAUType10GBaseT uint16 = 54 + LLDPMAUType10GBaseLRM uint16 = 55 + LLDPMAUType1000BaseKX uint16 = 56 + LLDPMAUType10GBaseKX4 uint16 = 57 + LLDPMAUType10GBaseKR uint16 = 58 + LLDPMAUType10_1GBasePRX_D1 uint16 = 59 + LLDPMAUType10_1GBasePRX_D2 uint16 = 60 + LLDPMAUType10_1GBasePRX_D3 uint16 = 61 + LLDPMAUType10_1GBasePRX_U1 uint16 = 62 + LLDPMAUType10_1GBasePRX_U2 uint16 = 63 + LLDPMAUType10_1GBasePRX_U3 uint16 = 64 + LLDPMAUType10GBasePR_D1 uint16 = 65 + LLDPMAUType10GBasePR_D2 uint16 = 66 + LLDPMAUType10GBasePR_D3 uint16 = 67 + LLDPMAUType10GBasePR_U1 uint16 = 68 + LLDPMAUType10GBasePR_U3 uint16 = 69 +) + +// From RFC 3636 - ifMauAutoNegCapAdvertisedBits +const ( + LLDPMAUPMDOther uint16 = 1 << 15 + LLDPMAUPMD10BaseT uint16 = 1 << 14 + LLDPMAUPMD10BaseT_FD uint16 = 1 << 13 + LLDPMAUPMD100BaseT4 uint16 = 1 << 12 + LLDPMAUPMD100BaseTX uint16 = 1 << 11 + LLDPMAUPMD100BaseTX_FD uint16 = 1 << 10 + LLDPMAUPMD100BaseT2 uint16 = 1 << 9 + LLDPMAUPMD100BaseT2_FD uint16 = 1 << 8 + LLDPMAUPMDFDXPAUSE uint16 = 1 << 7 + LLDPMAUPMDFDXAPAUSE uint16 = 1 << 6 + LLDPMAUPMDFDXSPAUSE uint16 = 1 << 5 + LLDPMAUPMDFDXBPAUSE uint16 = 1 << 4 + LLDPMAUPMD1000BaseX uint16 = 1 << 3 + LLDPMAUPMD1000BaseX_FD uint16 = 1 << 2 + LLDPMAUPMD1000BaseT uint16 = 1 << 1 + LLDPMAUPMD1000BaseT_FD uint16 = 1 << 0 +) + +// Inverted ifMauAutoNegCapAdvertisedBits if required +// (Some manufacturers misinterpreted the spec - +// see https://bugs.wireshark.org/bugzilla/show_bug.cgi?id=1455) +const ( + LLDPMAUPMDOtherInv uint16 = 1 << 0 + LLDPMAUPMD10BaseTInv uint16 = 1 << 1 + LLDPMAUPMD10BaseT_FDInv uint16 = 1 << 2 + LLDPMAUPMD100BaseT4Inv uint16 = 1 << 3 + LLDPMAUPMD100BaseTXInv uint16 = 1 << 4 + LLDPMAUPMD100BaseTX_FDInv uint16 = 1 << 5 + LLDPMAUPMD100BaseT2Inv uint16 = 1 << 6 + LLDPMAUPMD100BaseT2_FDInv uint16 = 1 << 7 + LLDPMAUPMDFDXPAUSEInv uint16 = 1 << 8 + LLDPMAUPMDFDXAPAUSEInv uint16 = 1 << 9 + LLDPMAUPMDFDXSPAUSEInv uint16 = 1 << 10 + LLDPMAUPMDFDXBPAUSEInv uint16 = 1 << 11 + LLDPMAUPMD1000BaseXInv uint16 = 1 << 12 + LLDPMAUPMD1000BaseX_FDInv uint16 = 1 << 13 + LLDPMAUPMD1000BaseTInv uint16 = 1 << 14 + LLDPMAUPMD1000BaseT_FDInv uint16 = 1 << 15 +) + +type LLDPMACPHYConfigStatus struct { + AutoNegSupported bool + AutoNegEnabled bool + AutoNegCapability uint16 + MAUType uint16 +} + +// MDI Power options +const ( + LLDPMDIPowerPortClass byte = 1 << 0 + LLDPMDIPowerCapability byte = 1 << 1 + LLDPMDIPowerStatus byte = 1 << 2 + LLDPMDIPowerPairsAbility byte = 1 << 3 +) + +type LLDPPowerType byte + +type LLDPPowerSource byte + +type LLDPPowerPriority byte + +const ( + LLDPPowerPriorityUnknown LLDPPowerPriority = 0 + LLDPPowerPriorityMedium LLDPPowerPriority = 1 + LLDPPowerPriorityHigh LLDPPowerPriority = 2 + LLDPPowerPriorityLow LLDPPowerPriority = 3 +) + +type LLDPPowerViaMDI8023 struct { + PortClassPSE bool // false = PD + PSESupported bool + PSEEnabled bool + PSEPairsAbility bool + PSEPowerPair uint8 + PSEClass uint8 + Type LLDPPowerType + Source LLDPPowerSource + Priority LLDPPowerPriority + Requested uint16 // 1-510 Watts + Allocated uint16 // 1-510 Watts +} + +// LLDPInfo8023 represents the information carried in 802.3 Org-specific TLVs +type LLDPInfo8023 struct { + MACPHYConfigStatus LLDPMACPHYConfigStatus + PowerViaMDI LLDPPowerViaMDI8023 + LinkAggregation LLDPLinkAggregation + MTU uint16 +} + +// IEEE 802.1Qbg TLV Subtypes +const ( + LLDP8021QbgEVB uint8 = 0 + LLDP8021QbgCDCP uint8 = 1 + LLDP8021QbgVDP uint8 = 2 + LLDP8021QbgEVB22 uint8 = 13 +) + +// LLDPEVBCapabilities Types +const ( + LLDPEVBCapsSTD uint16 = 1 << 7 + LLDPEVBCapsRR uint16 = 1 << 6 + LLDPEVBCapsRTE uint16 = 1 << 2 + LLDPEVBCapsECP uint16 = 1 << 1 + LLDPEVBCapsVDP uint16 = 1 << 0 +) + +// LLDPEVBCapabilities represents the EVB capabilities of a device +type LLDPEVBCapabilities struct { + StandardBridging bool + ReflectiveRelay bool + RetransmissionTimerExponent bool + EdgeControlProtocol bool + VSIDiscoveryProtocol bool +} + +type LLDPEVBSettings struct { + Supported LLDPEVBCapabilities + Enabled LLDPEVBCapabilities + SupportedVSIs uint16 + ConfiguredVSIs uint16 + RTEExponent uint8 +} + +// LLDPInfo8021Qbg represents the information carried in 802.1Qbg Org-specific TLVs +type LLDPInfo8021Qbg struct { + EVBSettings LLDPEVBSettings +} + +type LLDPMediaSubtype uint8 + +// Media TLV Subtypes +const ( + LLDPMediaTypeCapabilities LLDPMediaSubtype = 1 + LLDPMediaTypeNetwork LLDPMediaSubtype = 2 + LLDPMediaTypeLocation LLDPMediaSubtype = 3 + LLDPMediaTypePower LLDPMediaSubtype = 4 + LLDPMediaTypeHardware LLDPMediaSubtype = 5 + LLDPMediaTypeFirmware LLDPMediaSubtype = 6 + LLDPMediaTypeSoftware LLDPMediaSubtype = 7 + LLDPMediaTypeSerial LLDPMediaSubtype = 8 + LLDPMediaTypeManufacturer LLDPMediaSubtype = 9 + LLDPMediaTypeModel LLDPMediaSubtype = 10 + LLDPMediaTypeAssetID LLDPMediaSubtype = 11 +) + +type LLDPMediaClass uint8 + +// Media Class Values +const ( + LLDPMediaClassUndefined LLDPMediaClass = 0 + LLDPMediaClassEndpointI LLDPMediaClass = 1 + LLDPMediaClassEndpointII LLDPMediaClass = 2 + LLDPMediaClassEndpointIII LLDPMediaClass = 3 + LLDPMediaClassNetwork LLDPMediaClass = 4 +) + +// LLDPMediaCapabilities Types +const ( + LLDPMediaCapsLLDP uint16 = 1 << 0 + LLDPMediaCapsNetwork uint16 = 1 << 1 + LLDPMediaCapsLocation uint16 = 1 << 2 + LLDPMediaCapsPowerPSE uint16 = 1 << 3 + LLDPMediaCapsPowerPD uint16 = 1 << 4 + LLDPMediaCapsInventory uint16 = 1 << 5 +) + +// LLDPMediaCapabilities represents the LLDP Media capabilities of a device +type LLDPMediaCapabilities struct { + Capabilities bool + NetworkPolicy bool + Location bool + PowerPSE bool + PowerPD bool + Inventory bool + Class LLDPMediaClass +} + +type LLDPApplicationType uint8 + +const ( + LLDPAppTypeReserved LLDPApplicationType = 0 + LLDPAppTypeVoice LLDPApplicationType = 1 + LLDPappTypeVoiceSignaling LLDPApplicationType = 2 + LLDPappTypeGuestVoice LLDPApplicationType = 3 + LLDPappTypeGuestVoiceSignaling LLDPApplicationType = 4 + LLDPappTypeSoftphoneVoice LLDPApplicationType = 5 + LLDPappTypeVideoConferencing LLDPApplicationType = 6 + LLDPappTypeStreamingVideo LLDPApplicationType = 7 + LLDPappTypeVideoSignaling LLDPApplicationType = 8 +) + +type LLDPNetworkPolicy struct { + ApplicationType LLDPApplicationType + Defined bool + Tagged bool + VLANId uint16 + L2Priority uint16 + DSCPValue uint8 +} + +type LLDPLocationFormat uint8 + +const ( + LLDPLocationFormatInvalid LLDPLocationFormat = 0 + LLDPLocationFormatCoordinate LLDPLocationFormat = 1 + LLDPLocationFormatAddress LLDPLocationFormat = 2 + LLDPLocationFormatECS LLDPLocationFormat = 3 +) + +type LLDPLocationAddressWhat uint8 + +const ( + LLDPLocationAddressWhatDHCP LLDPLocationAddressWhat = 0 + LLDPLocationAddressWhatNetwork LLDPLocationAddressWhat = 1 + LLDPLocationAddressWhatClient LLDPLocationAddressWhat = 2 +) + +type LLDPLocationAddressType uint8 + +const ( + LLDPLocationAddressTypeLanguage LLDPLocationAddressType = 0 + LLDPLocationAddressTypeNational LLDPLocationAddressType = 1 + LLDPLocationAddressTypeCounty LLDPLocationAddressType = 2 + LLDPLocationAddressTypeCity LLDPLocationAddressType = 3 + LLDPLocationAddressTypeCityDivision LLDPLocationAddressType = 4 + LLDPLocationAddressTypeNeighborhood LLDPLocationAddressType = 5 + LLDPLocationAddressTypeStreet LLDPLocationAddressType = 6 + LLDPLocationAddressTypeLeadingStreet LLDPLocationAddressType = 16 + LLDPLocationAddressTypeTrailingStreet LLDPLocationAddressType = 17 + LLDPLocationAddressTypeStreetSuffix LLDPLocationAddressType = 18 + LLDPLocationAddressTypeHouseNum LLDPLocationAddressType = 19 + LLDPLocationAddressTypeHouseSuffix LLDPLocationAddressType = 20 + LLDPLocationAddressTypeLandmark LLDPLocationAddressType = 21 + LLDPLocationAddressTypeAdditional LLDPLocationAddressType = 22 + LLDPLocationAddressTypeName LLDPLocationAddressType = 23 + LLDPLocationAddressTypePostal LLDPLocationAddressType = 24 + LLDPLocationAddressTypeBuilding LLDPLocationAddressType = 25 + LLDPLocationAddressTypeUnit LLDPLocationAddressType = 26 + LLDPLocationAddressTypeFloor LLDPLocationAddressType = 27 + LLDPLocationAddressTypeRoom LLDPLocationAddressType = 28 + LLDPLocationAddressTypePlace LLDPLocationAddressType = 29 + LLDPLocationAddressTypeScript LLDPLocationAddressType = 128 +) + +type LLDPLocationCoordinate struct { + LatitudeResolution uint8 + Latitude uint64 + LongitudeResolution uint8 + Longitude uint64 + AltitudeType uint8 + AltitudeResolution uint16 + Altitude uint32 + Datum uint8 +} + +type LLDPLocationAddressLine struct { + Type LLDPLocationAddressType + Value string +} + +type LLDPLocationAddress struct { + What LLDPLocationAddressWhat + CountryCode string + AddressLines []LLDPLocationAddressLine +} + +type LLDPLocationECS struct { + ELIN string +} + +// LLDP represents a physical location. +// Only one of the embedded types will contain values, depending on Format. +type LLDPLocation struct { + Format LLDPLocationFormat + Coordinate LLDPLocationCoordinate + Address LLDPLocationAddress + ECS LLDPLocationECS +} + +type LLDPPowerViaMDI struct { + Type LLDPPowerType + Source LLDPPowerSource + Priority LLDPPowerPriority + Value uint16 +} + +// LLDPInfoMedia represents the information carried in TR-41 Org-specific TLVs +type LLDPInfoMedia struct { + MediaCapabilities LLDPMediaCapabilities + NetworkPolicy LLDPNetworkPolicy + Location LLDPLocation + PowerViaMDI LLDPPowerViaMDI + HardwareRevision string + FirmwareRevision string + SoftwareRevision string + SerialNumber string + Manufacturer string + Model string + AssetID string +} + +type LLDPCisco2Subtype uint8 + +// Cisco2 TLV Subtypes +const ( + LLDPCisco2PowerViaMDI LLDPCisco2Subtype = 1 +) + +const ( + LLDPCiscoPSESupport uint8 = 1 << 0 + LLDPCiscoArchShared uint8 = 1 << 1 + LLDPCiscoPDSparePair uint8 = 1 << 2 + LLDPCiscoPSESparePair uint8 = 1 << 3 +) + +// LLDPInfoCisco2 represents the information carried in Cisco Org-specific TLVs +type LLDPInfoCisco2 struct { + PSEFourWirePoESupported bool + PDSparePairArchitectureShared bool + PDRequestSparePairPoEOn bool + PSESparePairPoEOn bool +} + +// Profinet Subtypes +type LLDPProfinetSubtype uint8 + +const ( + LLDPProfinetPNIODelay LLDPProfinetSubtype = 1 + LLDPProfinetPNIOPortStatus LLDPProfinetSubtype = 2 + LLDPProfinetPNIOMRPPortStatus LLDPProfinetSubtype = 4 + LLDPProfinetPNIOChassisMAC LLDPProfinetSubtype = 5 + LLDPProfinetPNIOPTCPStatus LLDPProfinetSubtype = 6 +) + +type LLDPPNIODelay struct { + RXLocal uint32 + RXRemote uint32 + TXLocal uint32 + TXRemote uint32 + CableLocal uint32 +} + +type LLDPPNIOPortStatus struct { + Class2 uint16 + Class3 uint16 +} + +type LLDPPNIOMRPPortStatus struct { + UUID []byte + Status uint16 +} + +type LLDPPNIOPTCPStatus struct { + MasterAddress []byte + SubdomainUUID []byte + IRDataUUID []byte + PeriodValid bool + PeriodLength uint32 + RedPeriodValid bool + RedPeriodBegin uint32 + OrangePeriodValid bool + OrangePeriodBegin uint32 + GreenPeriodValid bool + GreenPeriodBegin uint32 +} + +// LLDPInfoProfinet represents the information carried in Profinet Org-specific TLVs +type LLDPInfoProfinet struct { + PNIODelay LLDPPNIODelay + PNIOPortStatus LLDPPNIOPortStatus + PNIOMRPPortStatus LLDPPNIOMRPPortStatus + ChassisMAC []byte + PNIOPTCPStatus LLDPPNIOPTCPStatus +} + +// LayerType returns gopacket.LayerTypeLinkLayerDiscovery. +func (c *LinkLayerDiscovery) LayerType() gopacket.LayerType { + return LayerTypeLinkLayerDiscovery +} + +// SerializeTo serializes LLDP packet to bytes and writes on SerializeBuffer. +func (c *LinkLayerDiscovery) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + chassIDLen := c.ChassisID.serializedLen() + portIDLen := c.PortID.serializedLen() + vb, err := b.AppendBytes(chassIDLen + portIDLen + 4) // +4 for TTL + if err != nil { + return err + } + copy(vb[:chassIDLen], c.ChassisID.serialize()) + copy(vb[chassIDLen:], c.PortID.serialize()) + ttlIDLen := uint16(LLDPTLVTTL)<<9 | uint16(2) + binary.BigEndian.PutUint16(vb[chassIDLen+portIDLen:], ttlIDLen) + binary.BigEndian.PutUint16(vb[chassIDLen+portIDLen+2:], c.TTL) + + for _, v := range c.Values { + vb, err := b.AppendBytes(int(v.Length) + 2) // +2 for TLV type and length; 1 byte for subtype is included in v.Value + if err != nil { + return err + } + idLen := ((uint16(v.Type) << 9) | v.Length) + binary.BigEndian.PutUint16(vb[0:2], idLen) + copy(vb[2:], v.Value) + } + + vb, err = b.AppendBytes(2) // End Tlv, 2 bytes + if err != nil { + return err + } + binary.BigEndian.PutUint16(vb[len(vb)-2:], uint16(0)) //End tlv, 2 bytes, all zero + return nil + +} + +func decodeLinkLayerDiscovery(data []byte, p gopacket.PacketBuilder) error { + var vals []LinkLayerDiscoveryValue + vData := data[0:] + for len(vData) > 0 { + if len(vData) < 2 { + p.SetTruncated() + return errors.New("LLDP vdata < 2 bytes") + } + nbit := vData[0] & 0x01 + t := LLDPTLVType(vData[0] >> 1) + val := LinkLayerDiscoveryValue{Type: t, Length: uint16(nbit)<<8 + uint16(vData[1])} + if val.Length > 0 { + if len(vData) < int(val.Length+2) { + p.SetTruncated() + return fmt.Errorf("LLDP VData < %d bytes", val.Length+2) + } + val.Value = vData[2 : val.Length+2] + } + vals = append(vals, val) + if t == LLDPTLVEnd { + break + } + if len(vData) < int(2+val.Length) { + return errors.New("Malformed LinkLayerDiscovery Header") + } + vData = vData[2+val.Length:] + } + if len(vals) < 4 { + return errors.New("Missing mandatory LinkLayerDiscovery TLV") + } + c := &LinkLayerDiscovery{} + gotEnd := false + for _, v := range vals { + switch v.Type { + case LLDPTLVEnd: + gotEnd = true + case LLDPTLVChassisID: + if len(v.Value) < 2 { + return errors.New("Malformed LinkLayerDiscovery ChassisID TLV") + } + c.ChassisID.Subtype = LLDPChassisIDSubType(v.Value[0]) + c.ChassisID.ID = v.Value[1:] + case LLDPTLVPortID: + if len(v.Value) < 2 { + return errors.New("Malformed LinkLayerDiscovery PortID TLV") + } + c.PortID.Subtype = LLDPPortIDSubType(v.Value[0]) + c.PortID.ID = v.Value[1:] + case LLDPTLVTTL: + if len(v.Value) < 2 { + return errors.New("Malformed LinkLayerDiscovery TTL TLV") + } + c.TTL = binary.BigEndian.Uint16(v.Value[0:2]) + default: + c.Values = append(c.Values, v) + } + } + if c.ChassisID.Subtype == 0 || c.PortID.Subtype == 0 || !gotEnd { + return errors.New("Missing mandatory LinkLayerDiscovery TLV") + } + c.Contents = data + p.AddLayer(c) + + info := &LinkLayerDiscoveryInfo{} + p.AddLayer(info) + for _, v := range c.Values { + switch v.Type { + case LLDPTLVPortDescription: + info.PortDescription = string(v.Value) + case LLDPTLVSysName: + info.SysName = string(v.Value) + case LLDPTLVSysDescription: + info.SysDescription = string(v.Value) + case LLDPTLVSysCapabilities: + if err := checkLLDPTLVLen(v, 4); err != nil { + return err + } + info.SysCapabilities.SystemCap = getCapabilities(binary.BigEndian.Uint16(v.Value[0:2])) + info.SysCapabilities.EnabledCap = getCapabilities(binary.BigEndian.Uint16(v.Value[2:4])) + case LLDPTLVMgmtAddress: + if err := checkLLDPTLVLen(v, 9); err != nil { + return err + } + mlen := v.Value[0] + if err := checkLLDPTLVLen(v, int(mlen+7)); err != nil { + return err + } + info.MgmtAddress.Subtype = IANAAddressFamily(v.Value[1]) + info.MgmtAddress.Address = v.Value[2 : mlen+1] + info.MgmtAddress.InterfaceSubtype = LLDPInterfaceSubtype(v.Value[mlen+1]) + info.MgmtAddress.InterfaceNumber = binary.BigEndian.Uint32(v.Value[mlen+2 : mlen+6]) + olen := v.Value[mlen+6] + if err := checkLLDPTLVLen(v, int(mlen+7+olen)); err != nil { + return err + } + info.MgmtAddress.OID = string(v.Value[mlen+7 : mlen+7+olen]) + case LLDPTLVOrgSpecific: + if err := checkLLDPTLVLen(v, 4); err != nil { + return err + } + info.OrgTLVs = append(info.OrgTLVs, LLDPOrgSpecificTLV{IEEEOUI(binary.BigEndian.Uint32(append([]byte{byte(0)}, v.Value[0:3]...))), uint8(v.Value[3]), v.Value[4:]}) + } + } + return nil +} + +func (l *LinkLayerDiscoveryInfo) Decode8021() (info LLDPInfo8021, err error) { + for _, o := range l.OrgTLVs { + if o.OUI != IEEEOUI8021 { + continue + } + switch o.SubType { + case LLDP8021SubtypePortVLANID: + if err = checkLLDPOrgSpecificLen(o, 2); err != nil { + return + } + info.PVID = binary.BigEndian.Uint16(o.Info[0:2]) + case LLDP8021SubtypeProtocolVLANID: + if err = checkLLDPOrgSpecificLen(o, 3); err != nil { + return + } + sup := (o.Info[0]&LLDPProtocolVLANIDCapability > 0) + en := (o.Info[0]&LLDPProtocolVLANIDStatus > 0) + id := binary.BigEndian.Uint16(o.Info[1:3]) + info.PPVIDs = append(info.PPVIDs, PortProtocolVLANID{sup, en, id}) + case LLDP8021SubtypeVLANName: + if err = checkLLDPOrgSpecificLen(o, 2); err != nil { + return + } + id := binary.BigEndian.Uint16(o.Info[0:2]) + info.VLANNames = append(info.VLANNames, VLANName{id, string(o.Info[3:])}) + case LLDP8021SubtypeProtocolIdentity: + if err = checkLLDPOrgSpecificLen(o, 1); err != nil { + return + } + l := int(o.Info[0]) + if l > 0 { + info.ProtocolIdentities = append(info.ProtocolIdentities, o.Info[1:1+l]) + } + case LLDP8021SubtypeVDIUsageDigest: + if err = checkLLDPOrgSpecificLen(o, 4); err != nil { + return + } + info.VIDUsageDigest = binary.BigEndian.Uint32(o.Info[0:4]) + case LLDP8021SubtypeManagementVID: + if err = checkLLDPOrgSpecificLen(o, 2); err != nil { + return + } + info.ManagementVID = binary.BigEndian.Uint16(o.Info[0:2]) + case LLDP8021SubtypeLinkAggregation: + if err = checkLLDPOrgSpecificLen(o, 5); err != nil { + return + } + sup := (o.Info[0]&LLDPAggregationCapability > 0) + en := (o.Info[0]&LLDPAggregationStatus > 0) + info.LinkAggregation = LLDPLinkAggregation{sup, en, binary.BigEndian.Uint32(o.Info[1:5])} + } + } + return +} + +func (l *LinkLayerDiscoveryInfo) Decode8023() (info LLDPInfo8023, err error) { + for _, o := range l.OrgTLVs { + if o.OUI != IEEEOUI8023 { + continue + } + switch o.SubType { + case LLDP8023SubtypeMACPHY: + if err = checkLLDPOrgSpecificLen(o, 5); err != nil { + return + } + sup := (o.Info[0]&LLDPMACPHYCapability > 0) + en := (o.Info[0]&LLDPMACPHYStatus > 0) + ca := binary.BigEndian.Uint16(o.Info[1:3]) + mau := binary.BigEndian.Uint16(o.Info[3:5]) + info.MACPHYConfigStatus = LLDPMACPHYConfigStatus{sup, en, ca, mau} + case LLDP8023SubtypeMDIPower: + if err = checkLLDPOrgSpecificLen(o, 3); err != nil { + return + } + info.PowerViaMDI.PortClassPSE = (o.Info[0]&LLDPMDIPowerPortClass > 0) + info.PowerViaMDI.PSESupported = (o.Info[0]&LLDPMDIPowerCapability > 0) + info.PowerViaMDI.PSEEnabled = (o.Info[0]&LLDPMDIPowerStatus > 0) + info.PowerViaMDI.PSEPairsAbility = (o.Info[0]&LLDPMDIPowerPairsAbility > 0) + info.PowerViaMDI.PSEPowerPair = uint8(o.Info[1]) + info.PowerViaMDI.PSEClass = uint8(o.Info[2]) + if len(o.Info) >= 7 { + info.PowerViaMDI.Type = LLDPPowerType((o.Info[3] & 0xc0) >> 6) + info.PowerViaMDI.Source = LLDPPowerSource((o.Info[3] & 0x30) >> 4) + if info.PowerViaMDI.Type == 1 || info.PowerViaMDI.Type == 3 { + info.PowerViaMDI.Source += 128 // For Stringify purposes + } + info.PowerViaMDI.Priority = LLDPPowerPriority(o.Info[3] & 0x0f) + info.PowerViaMDI.Requested = binary.BigEndian.Uint16(o.Info[4:6]) + info.PowerViaMDI.Allocated = binary.BigEndian.Uint16(o.Info[6:8]) + } + case LLDP8023SubtypeLinkAggregation: + if err = checkLLDPOrgSpecificLen(o, 5); err != nil { + return + } + sup := (o.Info[0]&LLDPAggregationCapability > 0) + en := (o.Info[0]&LLDPAggregationStatus > 0) + info.LinkAggregation = LLDPLinkAggregation{sup, en, binary.BigEndian.Uint32(o.Info[1:5])} + case LLDP8023SubtypeMTU: + if err = checkLLDPOrgSpecificLen(o, 2); err != nil { + return + } + info.MTU = binary.BigEndian.Uint16(o.Info[0:2]) + } + } + return +} + +func (l *LinkLayerDiscoveryInfo) Decode8021Qbg() (info LLDPInfo8021Qbg, err error) { + for _, o := range l.OrgTLVs { + if o.OUI != IEEEOUI8021Qbg { + continue + } + switch o.SubType { + case LLDP8021QbgEVB: + if err = checkLLDPOrgSpecificLen(o, 9); err != nil { + return + } + info.EVBSettings.Supported = getEVBCapabilities(binary.BigEndian.Uint16(o.Info[0:2])) + info.EVBSettings.Enabled = getEVBCapabilities(binary.BigEndian.Uint16(o.Info[2:4])) + info.EVBSettings.SupportedVSIs = binary.BigEndian.Uint16(o.Info[4:6]) + info.EVBSettings.ConfiguredVSIs = binary.BigEndian.Uint16(o.Info[6:8]) + info.EVBSettings.RTEExponent = uint8(o.Info[8]) + } + } + return +} + +func (l *LinkLayerDiscoveryInfo) DecodeMedia() (info LLDPInfoMedia, err error) { + for _, o := range l.OrgTLVs { + if o.OUI != IEEEOUIMedia { + continue + } + switch LLDPMediaSubtype(o.SubType) { + case LLDPMediaTypeCapabilities: + if err = checkLLDPOrgSpecificLen(o, 3); err != nil { + return + } + b := binary.BigEndian.Uint16(o.Info[0:2]) + info.MediaCapabilities.Capabilities = (b & LLDPMediaCapsLLDP) > 0 + info.MediaCapabilities.NetworkPolicy = (b & LLDPMediaCapsNetwork) > 0 + info.MediaCapabilities.Location = (b & LLDPMediaCapsLocation) > 0 + info.MediaCapabilities.PowerPSE = (b & LLDPMediaCapsPowerPSE) > 0 + info.MediaCapabilities.PowerPD = (b & LLDPMediaCapsPowerPD) > 0 + info.MediaCapabilities.Inventory = (b & LLDPMediaCapsInventory) > 0 + info.MediaCapabilities.Class = LLDPMediaClass(o.Info[2]) + case LLDPMediaTypeNetwork: + if err = checkLLDPOrgSpecificLen(o, 4); err != nil { + return + } + info.NetworkPolicy.ApplicationType = LLDPApplicationType(o.Info[0]) + b := binary.BigEndian.Uint16(o.Info[1:3]) + info.NetworkPolicy.Defined = (b & 0x8000) == 0 + info.NetworkPolicy.Tagged = (b & 0x4000) > 0 + info.NetworkPolicy.VLANId = (b & 0x1ffe) >> 1 + b = binary.BigEndian.Uint16(o.Info[2:4]) + info.NetworkPolicy.L2Priority = (b & 0x01c0) >> 6 + info.NetworkPolicy.DSCPValue = uint8(o.Info[3] & 0x3f) + case LLDPMediaTypeLocation: + if err = checkLLDPOrgSpecificLen(o, 1); err != nil { + return + } + info.Location.Format = LLDPLocationFormat(o.Info[0]) + o.Info = o.Info[1:] + switch info.Location.Format { + case LLDPLocationFormatCoordinate: + if err = checkLLDPOrgSpecificLen(o, 16); err != nil { + return + } + info.Location.Coordinate.LatitudeResolution = uint8(o.Info[0]&0xfc) >> 2 + b := binary.BigEndian.Uint64(o.Info[0:8]) + info.Location.Coordinate.Latitude = (b & 0x03ffffffff000000) >> 24 + info.Location.Coordinate.LongitudeResolution = uint8(o.Info[5]&0xfc) >> 2 + b = binary.BigEndian.Uint64(o.Info[5:13]) + info.Location.Coordinate.Longitude = (b & 0x03ffffffff000000) >> 24 + info.Location.Coordinate.AltitudeType = uint8((o.Info[10] & 0x30) >> 4) + b1 := binary.BigEndian.Uint16(o.Info[10:12]) + info.Location.Coordinate.AltitudeResolution = (b1 & 0xfc0) >> 6 + b2 := binary.BigEndian.Uint32(o.Info[11:15]) + info.Location.Coordinate.Altitude = b2 & 0x3fffffff + info.Location.Coordinate.Datum = uint8(o.Info[15]) + case LLDPLocationFormatAddress: + if err = checkLLDPOrgSpecificLen(o, 3); err != nil { + return + } + //ll := uint8(o.Info[0]) + info.Location.Address.What = LLDPLocationAddressWhat(o.Info[1]) + info.Location.Address.CountryCode = string(o.Info[2:4]) + data := o.Info[4:] + for len(data) > 1 { + aType := LLDPLocationAddressType(data[0]) + aLen := int(data[1]) + if len(data) >= aLen+2 { + info.Location.Address.AddressLines = append(info.Location.Address.AddressLines, LLDPLocationAddressLine{aType, string(data[2 : aLen+2])}) + data = data[aLen+2:] + } else { + break + } + } + case LLDPLocationFormatECS: + info.Location.ECS.ELIN = string(o.Info) + } + case LLDPMediaTypePower: + if err = checkLLDPOrgSpecificLen(o, 3); err != nil { + return + } + info.PowerViaMDI.Type = LLDPPowerType((o.Info[0] & 0xc0) >> 6) + info.PowerViaMDI.Source = LLDPPowerSource((o.Info[0] & 0x30) >> 4) + if info.PowerViaMDI.Type == 1 || info.PowerViaMDI.Type == 3 { + info.PowerViaMDI.Source += 128 // For Stringify purposes + } + info.PowerViaMDI.Priority = LLDPPowerPriority(o.Info[0] & 0x0f) + info.PowerViaMDI.Value = binary.BigEndian.Uint16(o.Info[1:3]) * 100 // 0 to 102.3 w, 0.1W increments + case LLDPMediaTypeHardware: + info.HardwareRevision = string(o.Info) + case LLDPMediaTypeFirmware: + info.FirmwareRevision = string(o.Info) + case LLDPMediaTypeSoftware: + info.SoftwareRevision = string(o.Info) + case LLDPMediaTypeSerial: + info.SerialNumber = string(o.Info) + case LLDPMediaTypeManufacturer: + info.Manufacturer = string(o.Info) + case LLDPMediaTypeModel: + info.Model = string(o.Info) + case LLDPMediaTypeAssetID: + info.AssetID = string(o.Info) + } + } + return +} + +func (l *LinkLayerDiscoveryInfo) DecodeCisco2() (info LLDPInfoCisco2, err error) { + for _, o := range l.OrgTLVs { + if o.OUI != IEEEOUICisco2 { + continue + } + switch LLDPCisco2Subtype(o.SubType) { + case LLDPCisco2PowerViaMDI: + if err = checkLLDPOrgSpecificLen(o, 1); err != nil { + return + } + info.PSEFourWirePoESupported = (o.Info[0] & LLDPCiscoPSESupport) > 0 + info.PDSparePairArchitectureShared = (o.Info[0] & LLDPCiscoArchShared) > 0 + info.PDRequestSparePairPoEOn = (o.Info[0] & LLDPCiscoPDSparePair) > 0 + info.PSESparePairPoEOn = (o.Info[0] & LLDPCiscoPSESparePair) > 0 + } + } + return +} + +func (l *LinkLayerDiscoveryInfo) DecodeProfinet() (info LLDPInfoProfinet, err error) { + for _, o := range l.OrgTLVs { + if o.OUI != IEEEOUIProfinet { + continue + } + switch LLDPProfinetSubtype(o.SubType) { + case LLDPProfinetPNIODelay: + if err = checkLLDPOrgSpecificLen(o, 20); err != nil { + return + } + info.PNIODelay.RXLocal = binary.BigEndian.Uint32(o.Info[0:4]) + info.PNIODelay.RXRemote = binary.BigEndian.Uint32(o.Info[4:8]) + info.PNIODelay.TXLocal = binary.BigEndian.Uint32(o.Info[8:12]) + info.PNIODelay.TXRemote = binary.BigEndian.Uint32(o.Info[12:16]) + info.PNIODelay.CableLocal = binary.BigEndian.Uint32(o.Info[16:20]) + case LLDPProfinetPNIOPortStatus: + if err = checkLLDPOrgSpecificLen(o, 4); err != nil { + return + } + info.PNIOPortStatus.Class2 = binary.BigEndian.Uint16(o.Info[0:2]) + info.PNIOPortStatus.Class3 = binary.BigEndian.Uint16(o.Info[2:4]) + case LLDPProfinetPNIOMRPPortStatus: + if err = checkLLDPOrgSpecificLen(o, 18); err != nil { + return + } + info.PNIOMRPPortStatus.UUID = o.Info[0:16] + info.PNIOMRPPortStatus.Status = binary.BigEndian.Uint16(o.Info[16:18]) + case LLDPProfinetPNIOChassisMAC: + if err = checkLLDPOrgSpecificLen(o, 6); err != nil { + return + } + info.ChassisMAC = o.Info[0:6] + case LLDPProfinetPNIOPTCPStatus: + if err = checkLLDPOrgSpecificLen(o, 54); err != nil { + return + } + info.PNIOPTCPStatus.MasterAddress = o.Info[0:6] + info.PNIOPTCPStatus.SubdomainUUID = o.Info[6:22] + info.PNIOPTCPStatus.IRDataUUID = o.Info[22:38] + b := binary.BigEndian.Uint32(o.Info[38:42]) + info.PNIOPTCPStatus.PeriodValid = (b & 0x80000000) > 0 + info.PNIOPTCPStatus.PeriodLength = b & 0x7fffffff + b = binary.BigEndian.Uint32(o.Info[42:46]) + info.PNIOPTCPStatus.RedPeriodValid = (b & 0x80000000) > 0 + info.PNIOPTCPStatus.RedPeriodBegin = b & 0x7fffffff + b = binary.BigEndian.Uint32(o.Info[46:50]) + info.PNIOPTCPStatus.OrangePeriodValid = (b & 0x80000000) > 0 + info.PNIOPTCPStatus.OrangePeriodBegin = b & 0x7fffffff + b = binary.BigEndian.Uint32(o.Info[50:54]) + info.PNIOPTCPStatus.GreenPeriodValid = (b & 0x80000000) > 0 + info.PNIOPTCPStatus.GreenPeriodBegin = b & 0x7fffffff + } + } + return +} + +// LayerType returns gopacket.LayerTypeLinkLayerDiscoveryInfo. +func (c *LinkLayerDiscoveryInfo) LayerType() gopacket.LayerType { + return LayerTypeLinkLayerDiscoveryInfo +} + +func getCapabilities(v uint16) (c LLDPCapabilities) { + c.Other = (v&LLDPCapsOther > 0) + c.Repeater = (v&LLDPCapsRepeater > 0) + c.Bridge = (v&LLDPCapsBridge > 0) + c.WLANAP = (v&LLDPCapsWLANAP > 0) + c.Router = (v&LLDPCapsRouter > 0) + c.Phone = (v&LLDPCapsPhone > 0) + c.DocSis = (v&LLDPCapsDocSis > 0) + c.StationOnly = (v&LLDPCapsStationOnly > 0) + c.CVLAN = (v&LLDPCapsCVLAN > 0) + c.SVLAN = (v&LLDPCapsSVLAN > 0) + c.TMPR = (v&LLDPCapsTmpr > 0) + return +} + +func getEVBCapabilities(v uint16) (c LLDPEVBCapabilities) { + c.StandardBridging = (v & LLDPEVBCapsSTD) > 0 + c.StandardBridging = (v & LLDPEVBCapsSTD) > 0 + c.ReflectiveRelay = (v & LLDPEVBCapsRR) > 0 + c.RetransmissionTimerExponent = (v & LLDPEVBCapsRTE) > 0 + c.EdgeControlProtocol = (v & LLDPEVBCapsECP) > 0 + c.VSIDiscoveryProtocol = (v & LLDPEVBCapsVDP) > 0 + return +} + +func (t LLDPTLVType) String() (s string) { + switch t { + case LLDPTLVEnd: + s = "TLV End" + case LLDPTLVChassisID: + s = "Chassis ID" + case LLDPTLVPortID: + s = "Port ID" + case LLDPTLVTTL: + s = "TTL" + case LLDPTLVPortDescription: + s = "Port Description" + case LLDPTLVSysName: + s = "System Name" + case LLDPTLVSysDescription: + s = "System Description" + case LLDPTLVSysCapabilities: + s = "System Capabilities" + case LLDPTLVMgmtAddress: + s = "Management Address" + case LLDPTLVOrgSpecific: + s = "Organisation Specific" + default: + s = "Unknown" + } + return +} + +func (t LLDPChassisIDSubType) String() (s string) { + switch t { + case LLDPChassisIDSubTypeReserved: + s = "Reserved" + case LLDPChassisIDSubTypeChassisComp: + s = "Chassis Component" + case LLDPChassisIDSubtypeIfaceAlias: + s = "Interface Alias" + case LLDPChassisIDSubTypePortComp: + s = "Port Component" + case LLDPChassisIDSubTypeMACAddr: + s = "MAC Address" + case LLDPChassisIDSubTypeNetworkAddr: + s = "Network Address" + case LLDPChassisIDSubtypeIfaceName: + s = "Interface Name" + case LLDPChassisIDSubTypeLocal: + s = "Local" + default: + s = "Unknown" + } + return +} + +func (t LLDPPortIDSubType) String() (s string) { + switch t { + case LLDPPortIDSubtypeReserved: + s = "Reserved" + case LLDPPortIDSubtypeIfaceAlias: + s = "Interface Alias" + case LLDPPortIDSubtypePortComp: + s = "Port Component" + case LLDPPortIDSubtypeMACAddr: + s = "MAC Address" + case LLDPPortIDSubtypeNetworkAddr: + s = "Network Address" + case LLDPPortIDSubtypeIfaceName: + s = "Interface Name" + case LLDPPortIDSubtypeAgentCircuitID: + s = "Agent Circuit ID" + case LLDPPortIDSubtypeLocal: + s = "Local" + default: + s = "Unknown" + } + return +} + +func (t IANAAddressFamily) String() (s string) { + switch t { + case IANAAddressFamilyReserved: + s = "Reserved" + case IANAAddressFamilyIPV4: + s = "IPv4" + case IANAAddressFamilyIPV6: + s = "IPv6" + case IANAAddressFamilyNSAP: + s = "NSAP" + case IANAAddressFamilyHDLC: + s = "HDLC" + case IANAAddressFamilyBBN1822: + s = "BBN 1822" + case IANAAddressFamily802: + s = "802 media plus Ethernet 'canonical format'" + case IANAAddressFamilyE163: + s = "E.163" + case IANAAddressFamilyE164: + s = "E.164 (SMDS, Frame Relay, ATM)" + case IANAAddressFamilyF69: + s = "F.69 (Telex)" + case IANAAddressFamilyX121: + s = "X.121, X.25, Frame Relay" + case IANAAddressFamilyIPX: + s = "IPX" + case IANAAddressFamilyAtalk: + s = "Appletalk" + case IANAAddressFamilyDecnet: + s = "Decnet IV" + case IANAAddressFamilyBanyan: + s = "Banyan Vines" + case IANAAddressFamilyE164NSAP: + s = "E.164 with NSAP format subaddress" + case IANAAddressFamilyDNS: + s = "DNS" + case IANAAddressFamilyDistname: + s = "Distinguished Name" + case IANAAddressFamilyASNumber: + s = "AS Number" + case IANAAddressFamilyXTPIPV4: + s = "XTP over IP version 4" + case IANAAddressFamilyXTPIPV6: + s = "XTP over IP version 6" + case IANAAddressFamilyXTP: + s = "XTP native mode XTP" + case IANAAddressFamilyFcWWPN: + s = "Fibre Channel World-Wide Port Name" + case IANAAddressFamilyFcWWNN: + s = "Fibre Channel World-Wide Node Name" + case IANAAddressFamilyGWID: + s = "GWID" + case IANAAddressFamilyL2VPN: + s = "AFI for Layer 2 VPN" + default: + s = "Unknown" + } + return +} + +func (t LLDPInterfaceSubtype) String() (s string) { + switch t { + case LLDPInterfaceSubtypeUnknown: + s = "Unknown" + case LLDPInterfaceSubtypeifIndex: + s = "IfIndex" + case LLDPInterfaceSubtypeSysPort: + s = "System Port Number" + default: + s = "Unknown" + } + return +} + +func (t LLDPPowerType) String() (s string) { + switch t { + case 0: + s = "Type 2 PSE Device" + case 1: + s = "Type 2 PD Device" + case 2: + s = "Type 1 PSE Device" + case 3: + s = "Type 1 PD Device" + default: + s = "Unknown" + } + return +} + +func (t LLDPPowerSource) String() (s string) { + switch t { + // PD Device + case 0: + s = "Unknown" + case 1: + s = "PSE" + case 2: + s = "Local" + case 3: + s = "PSE and Local" + // PSE Device (Actual value + 128) + case 128: + s = "Unknown" + case 129: + s = "Primary Power Source" + case 130: + s = "Backup Power Source" + default: + s = "Unknown" + } + return +} + +func (t LLDPPowerPriority) String() (s string) { + switch t { + case 0: + s = "Unknown" + case 1: + s = "Critical" + case 2: + s = "High" + case 3: + s = "Low" + default: + s = "Unknown" + } + return +} + +func (t LLDPMediaSubtype) String() (s string) { + switch t { + case LLDPMediaTypeCapabilities: + s = "Media Capabilities " + case LLDPMediaTypeNetwork: + s = "Network Policy" + case LLDPMediaTypeLocation: + s = "Location Identification" + case LLDPMediaTypePower: + s = "Extended Power-via-MDI" + case LLDPMediaTypeHardware: + s = "Hardware Revision" + case LLDPMediaTypeFirmware: + s = "Firmware Revision" + case LLDPMediaTypeSoftware: + s = "Software Revision" + case LLDPMediaTypeSerial: + s = "Serial Number" + case LLDPMediaTypeManufacturer: + s = "Manufacturer" + case LLDPMediaTypeModel: + s = "Model" + case LLDPMediaTypeAssetID: + s = "Asset ID" + default: + s = "Unknown" + } + return +} + +func (t LLDPMediaClass) String() (s string) { + switch t { + case LLDPMediaClassUndefined: + s = "Undefined" + case LLDPMediaClassEndpointI: + s = "Endpoint Class I" + case LLDPMediaClassEndpointII: + s = "Endpoint Class II" + case LLDPMediaClassEndpointIII: + s = "Endpoint Class III" + case LLDPMediaClassNetwork: + s = "Network connectivity " + default: + s = "Unknown" + } + return +} + +func (t LLDPApplicationType) String() (s string) { + switch t { + case LLDPAppTypeReserved: + s = "Reserved" + case LLDPAppTypeVoice: + s = "Voice" + case LLDPappTypeVoiceSignaling: + s = "Voice Signaling" + case LLDPappTypeGuestVoice: + s = "Guest Voice" + case LLDPappTypeGuestVoiceSignaling: + s = "Guest Voice Signaling" + case LLDPappTypeSoftphoneVoice: + s = "Softphone Voice" + case LLDPappTypeVideoConferencing: + s = "Video Conferencing" + case LLDPappTypeStreamingVideo: + s = "Streaming Video" + case LLDPappTypeVideoSignaling: + s = "Video Signaling" + default: + s = "Unknown" + } + return +} + +func (t LLDPLocationFormat) String() (s string) { + switch t { + case LLDPLocationFormatInvalid: + s = "Invalid" + case LLDPLocationFormatCoordinate: + s = "Coordinate-based LCI" + case LLDPLocationFormatAddress: + s = "Address-based LCO" + case LLDPLocationFormatECS: + s = "ECS ELIN" + default: + s = "Unknown" + } + return +} + +func (t LLDPLocationAddressType) String() (s string) { + switch t { + case LLDPLocationAddressTypeLanguage: + s = "Language" + case LLDPLocationAddressTypeNational: + s = "National subdivisions (province, state, etc)" + case LLDPLocationAddressTypeCounty: + s = "County, parish, district" + case LLDPLocationAddressTypeCity: + s = "City, township" + case LLDPLocationAddressTypeCityDivision: + s = "City division, borough, ward" + case LLDPLocationAddressTypeNeighborhood: + s = "Neighborhood, block" + case LLDPLocationAddressTypeStreet: + s = "Street" + case LLDPLocationAddressTypeLeadingStreet: + s = "Leading street direction" + case LLDPLocationAddressTypeTrailingStreet: + s = "Trailing street suffix" + case LLDPLocationAddressTypeStreetSuffix: + s = "Street suffix" + case LLDPLocationAddressTypeHouseNum: + s = "House number" + case LLDPLocationAddressTypeHouseSuffix: + s = "House number suffix" + case LLDPLocationAddressTypeLandmark: + s = "Landmark or vanity address" + case LLDPLocationAddressTypeAdditional: + s = "Additional location information" + case LLDPLocationAddressTypeName: + s = "Name" + case LLDPLocationAddressTypePostal: + s = "Postal/ZIP code" + case LLDPLocationAddressTypeBuilding: + s = "Building" + case LLDPLocationAddressTypeUnit: + s = "Unit" + case LLDPLocationAddressTypeFloor: + s = "Floor" + case LLDPLocationAddressTypeRoom: + s = "Room number" + case LLDPLocationAddressTypePlace: + s = "Place type" + case LLDPLocationAddressTypeScript: + s = "Script" + default: + s = "Unknown" + } + return +} + +func checkLLDPTLVLen(v LinkLayerDiscoveryValue, l int) (err error) { + if len(v.Value) < l { + err = fmt.Errorf("Invalid TLV %v length %d (wanted mimimum %v", v.Type, len(v.Value), l) + } + return +} + +func checkLLDPOrgSpecificLen(o LLDPOrgSpecificTLV, l int) (err error) { + if len(o.Info) < l { + err = fmt.Errorf("Invalid Org Specific TLV %v length %d (wanted minimum %v)", o.SubType, len(o.Info), l) + } + return +} diff --git a/vendor/github.com/google/gopacket/layers/loopback.go b/vendor/github.com/google/gopacket/layers/loopback.go new file mode 100644 index 0000000000..839f760739 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/loopback.go @@ -0,0 +1,80 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/google/gopacket" +) + +// Loopback contains the header for loopback encapsulation. This header is +// used by both BSD and OpenBSD style loopback decoding (pcap's DLT_NULL +// and DLT_LOOP, respectively). +type Loopback struct { + BaseLayer + Family ProtocolFamily +} + +// LayerType returns LayerTypeLoopback. +func (l *Loopback) LayerType() gopacket.LayerType { return LayerTypeLoopback } + +// DecodeFromBytes decodes the given bytes into this layer. +func (l *Loopback) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + return errors.New("Loopback packet too small") + } + + // The protocol could be either big-endian or little-endian, we're + // not sure. But we're PRETTY sure that the value is less than + // 256, so we can check the first two bytes. + var prot uint32 + if data[0] == 0 && data[1] == 0 { + prot = binary.BigEndian.Uint32(data[:4]) + } else { + prot = binary.LittleEndian.Uint32(data[:4]) + } + if prot > 0xFF { + return fmt.Errorf("Invalid loopback protocol %q", data[:4]) + } + + l.Family = ProtocolFamily(prot) + l.BaseLayer = BaseLayer{data[:4], data[4:]} + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (l *Loopback) CanDecode() gopacket.LayerClass { + return LayerTypeLoopback +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (l *Loopback) NextLayerType() gopacket.LayerType { + return l.Family.LayerType() +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +func (l *Loopback) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(4) + if err != nil { + return err + } + binary.LittleEndian.PutUint32(bytes, uint32(l.Family)) + return nil +} + +func decodeLoopback(data []byte, p gopacket.PacketBuilder) error { + l := Loopback{} + if err := l.DecodeFromBytes(data, gopacket.NilDecodeFeedback); err != nil { + return err + } + p.AddLayer(&l) + return p.NextDecoder(l.Family) +} diff --git a/vendor/github.com/google/gopacket/layers/mldv1.go b/vendor/github.com/google/gopacket/layers/mldv1.go new file mode 100644 index 0000000000..e1bb1dc00f --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/mldv1.go @@ -0,0 +1,182 @@ +// Copyright 2018 GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "net" + "time" + + "github.com/google/gopacket" +) + +// MLDv1Message represents the common structure of all MLDv1 messages +type MLDv1Message struct { + BaseLayer + // 3.4. Maximum Response Delay + MaximumResponseDelay time.Duration + // 3.6. Multicast Address + // Zero in general query + // Specific IPv6 multicast address otherwise + MulticastAddress net.IP +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (m *MLDv1Message) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 20 { + df.SetTruncated() + return errors.New("ICMP layer less than 20 bytes for Multicast Listener Query Message V1") + } + + m.MaximumResponseDelay = time.Duration(binary.BigEndian.Uint16(data[0:2])) * time.Millisecond + // data[2:4] is reserved and not used in mldv1 + m.MulticastAddress = data[4:20] + + return nil +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (*MLDv1Message) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (m *MLDv1Message) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(20) + if err != nil { + return err + } + + if m.MaximumResponseDelay < 0 { + return errors.New("maximum response delay must not be negative") + } + dms := m.MaximumResponseDelay / time.Millisecond + if dms > math.MaxUint16 { + return fmt.Errorf("maximum response delay %dms is more than the allowed 65535ms", dms) + } + binary.BigEndian.PutUint16(buf[0:2], uint16(dms)) + + copy(buf[2:4], []byte{0x0, 0x0}) + + ma16 := m.MulticastAddress.To16() + if ma16 == nil { + return fmt.Errorf("invalid multicast address '%s'", m.MulticastAddress) + } + copy(buf[4:20], ma16) + + return nil +} + +// Sums this layer up nicely formatted +func (m *MLDv1Message) String() string { + return fmt.Sprintf( + "Maximum Response Delay: %dms, Multicast Address: %s", + m.MaximumResponseDelay/time.Millisecond, + m.MulticastAddress) +} + +// MLDv1MulticastListenerQueryMessage are sent by the router to determine +// whether there are multicast listeners on the link. +// https://tools.ietf.org/html/rfc2710 Page 5 +type MLDv1MulticastListenerQueryMessage struct { + MLDv1Message +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (m *MLDv1MulticastListenerQueryMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + err := m.MLDv1Message.DecodeFromBytes(data, df) + if err != nil { + return err + } + + if len(data) > 20 { + m.Payload = data[20:] + } + + return nil +} + +// LayerType returns LayerTypeMLDv1MulticastListenerQuery. +func (*MLDv1MulticastListenerQueryMessage) LayerType() gopacket.LayerType { + return LayerTypeMLDv1MulticastListenerQuery +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (*MLDv1MulticastListenerQueryMessage) CanDecode() gopacket.LayerClass { + return LayerTypeMLDv1MulticastListenerQuery +} + +// IsGeneralQuery is true when this is a general query. +// In a Query message, the Multicast Address field is set to zero when +// sending a General Query. +// https://tools.ietf.org/html/rfc2710#section-3.6 +func (m *MLDv1MulticastListenerQueryMessage) IsGeneralQuery() bool { + return net.IPv6zero.Equal(m.MulticastAddress) +} + +// IsSpecificQuery is true when this is not a general query. +// In a Query message, the Multicast Address field is set to a specific +// IPv6 multicast address when sending a Multicast-Address-Specific Query. +// https://tools.ietf.org/html/rfc2710#section-3.6 +func (m *MLDv1MulticastListenerQueryMessage) IsSpecificQuery() bool { + return !m.IsGeneralQuery() +} + +// MLDv1MulticastListenerReportMessage is sent by a client listening on +// a specific multicast address to indicate that it is (still) listening +// on the specific multicast address. +// https://tools.ietf.org/html/rfc2710 Page 6 +type MLDv1MulticastListenerReportMessage struct { + MLDv1Message +} + +// LayerType returns LayerTypeMLDv1MulticastListenerReport. +func (*MLDv1MulticastListenerReportMessage) LayerType() gopacket.LayerType { + return LayerTypeMLDv1MulticastListenerReport +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (*MLDv1MulticastListenerReportMessage) CanDecode() gopacket.LayerClass { + return LayerTypeMLDv1MulticastListenerReport +} + +// MLDv1MulticastListenerDoneMessage should be sent by a client when it ceases +// to listen to a multicast address on an interface. +// https://tools.ietf.org/html/rfc2710 Page 7 +type MLDv1MulticastListenerDoneMessage struct { + MLDv1Message +} + +// LayerType returns LayerTypeMLDv1MulticastListenerDone. +func (*MLDv1MulticastListenerDoneMessage) LayerType() gopacket.LayerType { + return LayerTypeMLDv1MulticastListenerDone +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (*MLDv1MulticastListenerDoneMessage) CanDecode() gopacket.LayerClass { + return LayerTypeMLDv1MulticastListenerDone +} + +func decodeMLDv1MulticastListenerReport(data []byte, p gopacket.PacketBuilder) error { + m := &MLDv1MulticastListenerReportMessage{} + return decodingLayerDecoder(m, data, p) +} + +func decodeMLDv1MulticastListenerQuery(data []byte, p gopacket.PacketBuilder) error { + m := &MLDv1MulticastListenerQueryMessage{} + return decodingLayerDecoder(m, data, p) +} + +func decodeMLDv1MulticastListenerDone(data []byte, p gopacket.PacketBuilder) error { + m := &MLDv1MulticastListenerDoneMessage{} + return decodingLayerDecoder(m, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/mldv2.go b/vendor/github.com/google/gopacket/layers/mldv2.go new file mode 100644 index 0000000000..05100a52d1 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/mldv2.go @@ -0,0 +1,619 @@ +// Copyright 2018 GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "net" + "time" + + "github.com/google/gopacket" +) + +const ( + // S Flag bit is 1 + mldv2STrue uint8 = 0x8 + + // S Flag value mask + // mldv2STrue & mldv2SMask == mldv2STrue // true + // 0x1 & mldv2SMask == mldv2STrue // true + // 0x0 & mldv2SMask == mldv2STrue // false + mldv2SMask uint8 = 0x8 + + // QRV value mask + mldv2QRVMask uint8 = 0x7 +) + +// MLDv2MulticastListenerQueryMessage are sent by multicast routers to query the +// multicast listening state of neighboring interfaces. +// https://tools.ietf.org/html/rfc3810#section-5.1 +// +// Some information, like Maximum Response Code and Multicast Address are in the +// previous layer LayerTypeMLDv1MulticastListenerQuery +type MLDv2MulticastListenerQueryMessage struct { + BaseLayer + // 5.1.3. Maximum Response Delay COde + MaximumResponseCode uint16 + // 5.1.5. Multicast Address + // Zero in general query + // Specific IPv6 multicast address otherwise + MulticastAddress net.IP + // 5.1.7. S Flag (Suppress Router-Side Processing) + SuppressRoutersideProcessing bool + // 5.1.8. QRV (Querier's Robustness Variable) + QueriersRobustnessVariable uint8 + // 5.1.9. QQIC (Querier's Query Interval Code) + QueriersQueryIntervalCode uint8 + // 5.1.10. Number of Sources (N) + NumberOfSources uint16 + // 5.1.11 Source Address [i] + SourceAddresses []net.IP +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (m *MLDv2MulticastListenerQueryMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 24 { + df.SetTruncated() + return errors.New("ICMP layer less than 24 bytes for Multicast Listener Query Message V2") + } + + m.MaximumResponseCode = binary.BigEndian.Uint16(data[0:2]) + // ignore data[2:4] as per https://tools.ietf.org/html/rfc3810#section-5.1.4 + m.MulticastAddress = data[4:20] + m.SuppressRoutersideProcessing = (data[20] & mldv2SMask) == mldv2STrue + m.QueriersRobustnessVariable = data[20] & mldv2QRVMask + m.QueriersQueryIntervalCode = data[21] + + m.NumberOfSources = binary.BigEndian.Uint16(data[22:24]) + + var end int + for i := uint16(0); i < m.NumberOfSources; i++ { + begin := 24 + (int(i) * 16) + end = begin + 16 + + if end > len(data) { + df.SetTruncated() + return fmt.Errorf("ICMP layer less than %d bytes for Multicast Listener Query Message V2", end) + } + + m.SourceAddresses = append(m.SourceAddresses, data[begin:end]) + } + + return nil +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (*MLDv2MulticastListenerQueryMessage) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (m *MLDv2MulticastListenerQueryMessage) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if err := m.serializeSourceAddressesTo(b, opts); err != nil { + return err + } + + buf, err := b.PrependBytes(24) + if err != nil { + return err + } + + binary.BigEndian.PutUint16(buf[0:2], m.MaximumResponseCode) + copy(buf[2:4], []byte{0x00, 0x00}) // set reserved bytes to zero + + ma16 := m.MulticastAddress.To16() + if ma16 == nil { + return fmt.Errorf("invalid MulticastAddress '%s'", m.MulticastAddress) + } + copy(buf[4:20], ma16) + + byte20 := m.QueriersRobustnessVariable & mldv2QRVMask + if m.SuppressRoutersideProcessing { + byte20 |= mldv2STrue + } else { + byte20 &= ^mldv2STrue // the complement of mldv2STrue + } + byte20 &= 0x0F // set reserved bits to zero + buf[20] = byte20 + + binary.BigEndian.PutUint16(buf[22:24], m.NumberOfSources) + buf[21] = m.QueriersQueryIntervalCode + + return nil +} + +// writes each source address to the buffer preserving the order +func (m *MLDv2MulticastListenerQueryMessage) serializeSourceAddressesTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + numberOfSourceAddresses := len(m.SourceAddresses) + if numberOfSourceAddresses > math.MaxUint16 { + return fmt.Errorf( + "there are more than %d source addresses, but 65535 is the maximum number of supported addresses", + numberOfSourceAddresses) + } + + if opts.FixLengths { + m.NumberOfSources = uint16(numberOfSourceAddresses) + } + + lastSAIdx := numberOfSourceAddresses - 1 + for k := range m.SourceAddresses { + i := lastSAIdx - k // reverse order + + buf, err := b.PrependBytes(16) + if err != nil { + return err + } + + sa16 := m.SourceAddresses[i].To16() + if sa16 == nil { + return fmt.Errorf("invalid source address [%d] '%s'", i, m.SourceAddresses[i]) + } + copy(buf[0:16], sa16) + } + + return nil +} + +// String sums this layer up nicely formatted +func (m *MLDv2MulticastListenerQueryMessage) String() string { + return fmt.Sprintf( + "Maximum Response Code: %#x (%dms), Multicast Address: %s, Suppress Routerside Processing: %t, QRV: %#x, QQIC: %#x (%ds), Number of Source Address: %d (actual: %d), Source Addresses: %s", + m.MaximumResponseCode, + m.MaximumResponseDelay(), + m.MulticastAddress, + m.SuppressRoutersideProcessing, + m.QueriersRobustnessVariable, + m.QueriersQueryIntervalCode, + m.QQI()/time.Second, + m.NumberOfSources, + len(m.SourceAddresses), + m.SourceAddresses) +} + +// LayerType returns LayerTypeMLDv2MulticastListenerQuery. +func (*MLDv2MulticastListenerQueryMessage) LayerType() gopacket.LayerType { + return LayerTypeMLDv2MulticastListenerQuery +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (*MLDv2MulticastListenerQueryMessage) CanDecode() gopacket.LayerClass { + return LayerTypeMLDv2MulticastListenerQuery +} + +// QQI calculates the Querier's Query Interval based on the QQIC +// according to https://tools.ietf.org/html/rfc3810#section-5.1.9 +func (m *MLDv2MulticastListenerQueryMessage) QQI() time.Duration { + data := m.QueriersQueryIntervalCode + if data < 128 { + return time.Second * time.Duration(data) + } + + exp := uint16(data) & 0x70 >> 4 + mant := uint16(data) & 0x0F + return time.Second * time.Duration(mant|0x1000<<(exp+3)) +} + +// SetQQI calculates and updates the Querier's Query Interval Code (QQIC) +// according to https://tools.ietf.org/html/rfc3810#section-5.1.9 +func (m *MLDv2MulticastListenerQueryMessage) SetQQI(d time.Duration) error { + if d < 0 { + m.QueriersQueryIntervalCode = 0 + return errors.New("QQI duration is negative") + } + + if d == 0 { + m.QueriersQueryIntervalCode = 0 + return nil + } + + dms := d / time.Second + if dms < 128 { + m.QueriersQueryIntervalCode = uint8(dms) + } + + if dms > 31744 { // mant=0xF, exp=0x7 + m.QueriersQueryIntervalCode = 0xFF + return fmt.Errorf("QQI duration %ds is, maximum allowed is 31744s", dms) + } + + value := uint16(dms) // ok, because 31744 < math.MaxUint16 + exp := uint8(7) + for mask := uint16(0x4000); exp > 0; exp-- { + if mask&value != 0 { + break + } + + mask >>= 1 + } + + mant := uint8(0x000F & (value >> (exp + 3))) + sig := uint8(0x10) + m.QueriersQueryIntervalCode = sig | exp<<4 | mant + + return nil +} + +// MaximumResponseDelay returns the Maximum Response Delay based on the +// Maximum Response Code according to +// https://tools.ietf.org/html/rfc3810#section-5.1.3 +func (m *MLDv2MulticastListenerQueryMessage) MaximumResponseDelay() time.Duration { + if m.MaximumResponseCode < 0x8000 { + return time.Duration(m.MaximumResponseCode) + } + + exp := m.MaximumResponseCode & 0x7000 >> 12 + mant := m.MaximumResponseCode & 0x0FFF + + return time.Millisecond * time.Duration(mant|0x1000<<(exp+3)) +} + +// SetMLDv2MaximumResponseDelay updates the Maximum Response Code according to +// https://tools.ietf.org/html/rfc3810#section-5.1.3 +func (m *MLDv2MulticastListenerQueryMessage) SetMLDv2MaximumResponseDelay(d time.Duration) error { + if d == 0 { + m.MaximumResponseCode = 0 + return nil + } + + if d < 0 { + return errors.New("maximum response delay must not be negative") + } + + dms := d / time.Millisecond + + if dms < 32768 { + m.MaximumResponseCode = uint16(dms) + } + + if dms > 4193280 { // mant=0xFFF, exp=0x7 + return fmt.Errorf("maximum response delay %dms is bigger the than maximum of 4193280ms", dms) + } + + value := uint32(dms) // ok, because 4193280 < math.MaxUint32 + exp := uint8(7) + for mask := uint32(0x40000000); exp > 0; exp-- { + if mask&value != 0 { + break + } + + mask >>= 1 + } + + mant := uint16(0x00000FFF & (value >> (exp + 3))) + sig := uint16(0x1000) + m.MaximumResponseCode = sig | uint16(exp)<<12 | mant + return nil +} + +// MLDv2MulticastListenerReportMessage is sent by an IP node to report the +// current multicast listening state, or changes therein. +// https://tools.ietf.org/html/rfc3810#section-5.2 +type MLDv2MulticastListenerReportMessage struct { + BaseLayer + // 5.2.3. Nr of Mcast Address Records + NumberOfMulticastAddressRecords uint16 + // 5.2.4. Multicast Address Record [i] + MulticastAddressRecords []MLDv2MulticastAddressRecord +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (m *MLDv2MulticastListenerReportMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return errors.New("ICMP layer less than 4 bytes for Multicast Listener Report Message V2") + } + + // ignore data[0:2] as per RFC + // https://tools.ietf.org/html/rfc3810#section-5.2.1 + m.NumberOfMulticastAddressRecords = binary.BigEndian.Uint16(data[2:4]) + + begin := 4 + for i := uint16(0); i < m.NumberOfMulticastAddressRecords; i++ { + mar := MLDv2MulticastAddressRecord{} + read, err := mar.decode(data[begin:], df) + if err != nil { + return err + } + + m.MulticastAddressRecords = append(m.MulticastAddressRecords, mar) + + begin += read + } + + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (m *MLDv2MulticastListenerReportMessage) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + lastItemIdx := len(m.MulticastAddressRecords) - 1 + for k := range m.MulticastAddressRecords { + i := lastItemIdx - k // reverse order + + err := m.MulticastAddressRecords[i].serializeTo(b, opts) + if err != nil { + return err + } + } + + if opts.FixLengths { + numberOfMAR := len(m.MulticastAddressRecords) + if numberOfMAR > math.MaxUint16 { + return fmt.Errorf( + "%d multicast address records added, but the maximum is 65535", + numberOfMAR) + } + + m.NumberOfMulticastAddressRecords = uint16(numberOfMAR) + } + + buf, err := b.PrependBytes(4) + if err != nil { + return err + } + + copy(buf[0:2], []byte{0x0, 0x0}) + binary.BigEndian.PutUint16(buf[2:4], m.NumberOfMulticastAddressRecords) + return nil +} + +// Sums this layer up nicely formatted +func (m *MLDv2MulticastListenerReportMessage) String() string { + return fmt.Sprintf( + "Number of Mcast Addr Records: %d (actual %d), Multicast Address Records: %+v", + m.NumberOfMulticastAddressRecords, + len(m.MulticastAddressRecords), + m.MulticastAddressRecords) +} + +// LayerType returns LayerTypeMLDv2MulticastListenerQuery. +func (*MLDv2MulticastListenerReportMessage) LayerType() gopacket.LayerType { + return LayerTypeMLDv2MulticastListenerReport +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (*MLDv2MulticastListenerReportMessage) CanDecode() gopacket.LayerClass { + return LayerTypeMLDv2MulticastListenerReport +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (*MLDv2MulticastListenerReportMessage) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// MLDv2MulticastAddressRecordType holds the type of a +// Multicast Address Record, according to +// https://tools.ietf.org/html/rfc3810#section-5.2.5 and +// https://tools.ietf.org/html/rfc3810#section-5.2.12 +type MLDv2MulticastAddressRecordType uint8 + +const ( + // MLDv2MulticastAddressRecordTypeModeIsIncluded stands for + // MODE_IS_INCLUDE - indicates that the interface has a filter + // mode of INCLUDE for the specified multicast address. + MLDv2MulticastAddressRecordTypeModeIsIncluded MLDv2MulticastAddressRecordType = 1 + // MLDv2MulticastAddressRecordTypeModeIsExcluded stands for + // MODE_IS_EXCLUDE - indicates that the interface has a filter + // mode of EXCLUDE for the specified multicast address. + MLDv2MulticastAddressRecordTypeModeIsExcluded MLDv2MulticastAddressRecordType = 2 + // MLDv2MulticastAddressRecordTypeChangeToIncludeMode stands for + // CHANGE_TO_INCLUDE_MODE - indicates that the interface has + // changed to INCLUDE filter mode for the specified multicast + // address. + MLDv2MulticastAddressRecordTypeChangeToIncludeMode MLDv2MulticastAddressRecordType = 3 + // MLDv2MulticastAddressRecordTypeChangeToExcludeMode stands for + // CHANGE_TO_EXCLUDE_MODE - indicates that the interface has + // changed to EXCLUDE filter mode for the specified multicast + // address + MLDv2MulticastAddressRecordTypeChangeToExcludeMode MLDv2MulticastAddressRecordType = 4 + // MLDv2MulticastAddressRecordTypeAllowNewSources stands for + // ALLOW_NEW_SOURCES - indicates that the Source Address [i] + // fields in this Multicast Address Record contain a list of + // the additional sources that the node wishes to listen to, + // for packets sent to the specified multicast address. + MLDv2MulticastAddressRecordTypeAllowNewSources MLDv2MulticastAddressRecordType = 5 + // MLDv2MulticastAddressRecordTypeBlockOldSources stands for + // BLOCK_OLD_SOURCES - indicates that the Source Address [i] + // fields in this Multicast Address Record contain a list of + // the sources that the node no longer wishes to listen to, + // for packets sent to the specified multicast address. + MLDv2MulticastAddressRecordTypeBlockOldSources MLDv2MulticastAddressRecordType = 6 +) + +// Human readable record types +// Naming follows https://tools.ietf.org/html/rfc3810#section-5.2.12 +func (m MLDv2MulticastAddressRecordType) String() string { + switch m { + case MLDv2MulticastAddressRecordTypeModeIsIncluded: + return "MODE_IS_INCLUDE" + case MLDv2MulticastAddressRecordTypeModeIsExcluded: + return "MODE_IS_EXCLUDE" + case MLDv2MulticastAddressRecordTypeChangeToIncludeMode: + return "CHANGE_TO_INCLUDE_MODE" + case MLDv2MulticastAddressRecordTypeChangeToExcludeMode: + return "CHANGE_TO_EXCLUDE_MODE" + case MLDv2MulticastAddressRecordTypeAllowNewSources: + return "ALLOW_NEW_SOURCES" + case MLDv2MulticastAddressRecordTypeBlockOldSources: + return "BLOCK_OLD_SOURCES" + default: + return fmt.Sprintf("UNKNOWN(%d)", m) + } +} + +// MLDv2MulticastAddressRecord contains information on the sender listening to a +// single multicast address on the interface the report is sent. +// https://tools.ietf.org/html/rfc3810#section-5.2.4 +type MLDv2MulticastAddressRecord struct { + // 5.2.5. Record Type + RecordType MLDv2MulticastAddressRecordType + // 5.2.6. Auxiliary Data Length (number of 32-bit words) + AuxDataLen uint8 + // 5.2.7. Number Of Sources (N) + N uint16 + // 5.2.8. Multicast Address + MulticastAddress net.IP + // 5.2.9 Source Address [i] + SourceAddresses []net.IP + // 5.2.10 Auxiliary Data + AuxiliaryData []byte +} + +// decodes a multicast address record from bytes +func (m *MLDv2MulticastAddressRecord) decode(data []byte, df gopacket.DecodeFeedback) (int, error) { + if len(data) < 20 { + df.SetTruncated() + return 0, errors.New( + "Multicast Listener Report Message V2 layer less than 4 bytes for Multicast Address Record") + } + + m.RecordType = MLDv2MulticastAddressRecordType(data[0]) + m.AuxDataLen = data[1] + m.N = binary.BigEndian.Uint16(data[2:4]) + m.MulticastAddress = data[4:20] + + for i := uint16(0); i < m.N; i++ { + begin := 20 + (int(i) * 16) + end := begin + 16 + + if len(data) < end { + df.SetTruncated() + return begin, fmt.Errorf( + "Multicast Listener Report Message V2 layer less than %d bytes for Multicast Address Record", end) + } + + m.SourceAddresses = append(m.SourceAddresses, data[begin:end]) + } + + expectedLengthWithouAuxData := 20 + (int(m.N) * 16) + expectedTotalLength := (int(m.AuxDataLen) * 4) + expectedLengthWithouAuxData // *4 because AuxDataLen are 32bit words + if len(data) < expectedTotalLength { + return expectedLengthWithouAuxData, fmt.Errorf( + "Multicast Listener Report Message V2 layer less than %d bytes for Multicast Address Record", + expectedLengthWithouAuxData) + } + + m.AuxiliaryData = data[expectedLengthWithouAuxData:expectedTotalLength] + + return expectedTotalLength, nil +} + +// String sums this layer up nicely formatted +func (m *MLDv2MulticastAddressRecord) String() string { + return fmt.Sprintf( + "RecordType: %d (%s), AuxDataLen: %d [32-bit words], N: %d, Multicast Address: %s, SourceAddresses: %s, Auxiliary Data: %#x", + m.RecordType, + m.RecordType.String(), + m.AuxDataLen, + m.N, + m.MulticastAddress.To16(), + m.SourceAddresses, + m.AuxiliaryData) +} + +// serializes a multicast address record +func (m *MLDv2MulticastAddressRecord) serializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if err := m.serializeAuxiliaryDataTo(b, opts); err != nil { + return err + } + + if err := m.serializeSourceAddressesTo(b, opts); err != nil { + return err + } + + buf, err := b.PrependBytes(20) + if err != nil { + return err + } + + buf[0] = uint8(m.RecordType) + buf[1] = m.AuxDataLen + binary.BigEndian.PutUint16(buf[2:4], m.N) + + ma16 := m.MulticastAddress.To16() + if ma16 == nil { + return fmt.Errorf("invalid multicast address '%s'", m.MulticastAddress) + } + copy(buf[4:20], ma16) + + return nil +} + +// serializes the auxiliary data of a multicast address record +func (m *MLDv2MulticastAddressRecord) serializeAuxiliaryDataTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if remainder := len(m.AuxiliaryData) % 4; remainder != 0 { + zeroWord := []byte{0x0, 0x0, 0x0, 0x0} + m.AuxiliaryData = append(m.AuxiliaryData, zeroWord[:remainder]...) + } + + if opts.FixLengths { + auxDataLen := len(m.AuxiliaryData) / 4 + + if auxDataLen > math.MaxUint8 { + return fmt.Errorf("auxilary data is %d 32-bit words, but the maximum is 255 32-bit words", auxDataLen) + } + + m.AuxDataLen = uint8(auxDataLen) + } + + buf, err := b.PrependBytes(len(m.AuxiliaryData)) + if err != nil { + return err + } + + copy(buf, m.AuxiliaryData) + return nil +} + +// serializes the source addresses of a multicast address record preserving the order +func (m *MLDv2MulticastAddressRecord) serializeSourceAddressesTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if opts.FixLengths { + numberOfSourceAddresses := len(m.SourceAddresses) + + if numberOfSourceAddresses > math.MaxUint16 { + return fmt.Errorf( + "%d source addresses added, but the maximum is 65535", + numberOfSourceAddresses) + } + + m.N = uint16(numberOfSourceAddresses) + } + + lastItemIdx := len(m.SourceAddresses) - 1 + for k := range m.SourceAddresses { + i := lastItemIdx - k // reverse order + + buf, err := b.PrependBytes(16) + if err != nil { + return err + } + + sa16 := m.SourceAddresses[i].To16() + if sa16 == nil { + return fmt.Errorf("invalid source address [%d] '%s'", i, m.SourceAddresses[i]) + } + copy(buf, sa16) + } + + return nil +} + +func decodeMLDv2MulticastListenerReport(data []byte, p gopacket.PacketBuilder) error { + m := &MLDv2MulticastListenerReportMessage{} + return decodingLayerDecoder(m, data, p) +} + +func decodeMLDv2MulticastListenerQuery(data []byte, p gopacket.PacketBuilder) error { + m := &MLDv2MulticastListenerQueryMessage{} + return decodingLayerDecoder(m, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/modbustcp.go b/vendor/github.com/google/gopacket/layers/modbustcp.go new file mode 100644 index 0000000000..bafbd7436c --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/modbustcp.go @@ -0,0 +1,150 @@ +// Copyright 2018, The GoPacket Authors, All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. +// +//****************************************************************************** + +package layers + +import ( + "encoding/binary" + "errors" + "github.com/google/gopacket" +) + +//****************************************************************************** +// +// ModbusTCP Decoding Layer +// ------------------------------------------ +// This file provides a GoPacket decoding layer for ModbusTCP. +// +//****************************************************************************** + +const mbapRecordSizeInBytes int = 7 +const modbusPDUMinimumRecordSizeInBytes int = 2 +const modbusPDUMaximumRecordSizeInBytes int = 253 + +// ModbusProtocol type +type ModbusProtocol uint16 + +// ModbusProtocol known values. +const ( + ModbusProtocolModbus ModbusProtocol = 0 +) + +func (mp ModbusProtocol) String() string { + switch mp { + default: + return "Unknown" + case ModbusProtocolModbus: + return "Modbus" + } +} + +//****************************************************************************** + +// ModbusTCP Type +// -------- +// Type ModbusTCP implements the DecodingLayer interface. Each ModbusTCP object +// represents in a structured form the MODBUS Application Protocol header (MBAP) record present as the TCP +// payload in an ModbusTCP TCP packet. +// +type ModbusTCP struct { + BaseLayer // Stores the packet bytes and payload (Modbus PDU) bytes . + + TransactionIdentifier uint16 // Identification of a MODBUS Request/Response transaction + ProtocolIdentifier ModbusProtocol // It is used for intra-system multiplexing + Length uint16 // Number of following bytes (includes 1 byte for UnitIdentifier + Modbus data length + UnitIdentifier uint8 // Identification of a remote slave connected on a serial line or on other buses +} + +//****************************************************************************** + +// LayerType returns the layer type of the ModbusTCP object, which is LayerTypeModbusTCP. +func (d *ModbusTCP) LayerType() gopacket.LayerType { + return LayerTypeModbusTCP +} + +//****************************************************************************** + +// decodeModbusTCP analyses a byte slice and attempts to decode it as an ModbusTCP +// record of a TCP packet. +// +// If it succeeds, it loads p with information about the packet and returns nil. +// If it fails, it returns an error (non nil). +// +// This function is employed in layertypes.go to register the ModbusTCP layer. +func decodeModbusTCP(data []byte, p gopacket.PacketBuilder) error { + + // Attempt to decode the byte slice. + d := &ModbusTCP{} + err := d.DecodeFromBytes(data, p) + if err != nil { + return err + } + // If the decoding worked, add the layer to the packet and set it + // as the application layer too, if there isn't already one. + p.AddLayer(d) + p.SetApplicationLayer(d) + + return p.NextDecoder(d.NextLayerType()) + +} + +//****************************************************************************** + +// DecodeFromBytes analyses a byte slice and attempts to decode it as an ModbusTCP +// record of a TCP packet. +// +// Upon succeeds, it loads the ModbusTCP object with information about the packet +// and returns nil. +// Upon failure, it returns an error (non nil). +func (d *ModbusTCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + + // If the data block is too short to be a MBAP record, then return an error. + if len(data) < mbapRecordSizeInBytes+modbusPDUMinimumRecordSizeInBytes { + df.SetTruncated() + return errors.New("ModbusTCP packet too short") + } + + if len(data) > mbapRecordSizeInBytes+modbusPDUMaximumRecordSizeInBytes { + df.SetTruncated() + return errors.New("ModbusTCP packet too long") + } + + // ModbusTCP type embeds type BaseLayer which contains two fields: + // Contents is supposed to contain the bytes of the data at this level (MPBA). + // Payload is supposed to contain the payload of this level (PDU). + d.BaseLayer = BaseLayer{Contents: data[:mbapRecordSizeInBytes], Payload: data[mbapRecordSizeInBytes:len(data)]} + + // Extract the fields from the block of bytes. + // The fields can just be copied in big endian order. + d.TransactionIdentifier = binary.BigEndian.Uint16(data[:2]) + d.ProtocolIdentifier = ModbusProtocol(binary.BigEndian.Uint16(data[2:4])) + d.Length = binary.BigEndian.Uint16(data[4:6]) + + // Length should have the size of the payload plus one byte (size of UnitIdentifier) + if d.Length != uint16(len(d.BaseLayer.Payload)+1) { + df.SetTruncated() + return errors.New("ModbusTCP packet with wrong field value (Length)") + } + d.UnitIdentifier = uint8(data[6]) + + return nil +} + +//****************************************************************************** + +// NextLayerType returns the layer type of the ModbusTCP payload, which is LayerTypePayload. +func (d *ModbusTCP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +//****************************************************************************** + +// Payload returns Modbus Protocol Data Unit (PDU) composed by Function Code and Data, it is carried within ModbusTCP packets +func (d *ModbusTCP) Payload() []byte { + return d.BaseLayer.Payload +} diff --git a/vendor/github.com/google/gopacket/layers/mpls.go b/vendor/github.com/google/gopacket/layers/mpls.go new file mode 100644 index 0000000000..83079a09b7 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/mpls.go @@ -0,0 +1,87 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "github.com/google/gopacket" +) + +// MPLS is the MPLS packet header. +type MPLS struct { + BaseLayer + Label uint32 + TrafficClass uint8 + StackBottom bool + TTL uint8 +} + +// LayerType returns gopacket.LayerTypeMPLS. +func (m *MPLS) LayerType() gopacket.LayerType { return LayerTypeMPLS } + +// ProtocolGuessingDecoder attempts to guess the protocol of the bytes it's +// given, then decode the packet accordingly. Its algorithm for guessing is: +// If the packet starts with byte 0x45-0x4F: IPv4 +// If the packet starts with byte 0x60-0x6F: IPv6 +// Otherwise: Error +// See draft-hsmit-isis-aal5mux-00.txt for more detail on this approach. +type ProtocolGuessingDecoder struct{} + +func (ProtocolGuessingDecoder) Decode(data []byte, p gopacket.PacketBuilder) error { + switch data[0] { + // 0x40 | header_len, where header_len is at least 5. + case 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f: + return decodeIPv4(data, p) + // IPv6 can start with any byte whose first 4 bits are 0x6. + case 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f: + return decodeIPv6(data, p) + } + return errors.New("Unable to guess protocol of packet data") +} + +// MPLSPayloadDecoder is the decoder used to data encapsulated by each MPLS +// layer. MPLS contains no type information, so we have to explicitly decide +// which decoder to use. This is initially set to ProtocolGuessingDecoder, our +// simple attempt at guessing protocols based on the first few bytes of data +// available to us. However, if you know that in your environment MPLS always +// encapsulates a specific protocol, you may reset this. +var MPLSPayloadDecoder gopacket.Decoder = ProtocolGuessingDecoder{} + +func decodeMPLS(data []byte, p gopacket.PacketBuilder) error { + decoded := binary.BigEndian.Uint32(data[:4]) + mpls := &MPLS{ + Label: decoded >> 12, + TrafficClass: uint8(decoded>>9) & 0x7, + StackBottom: decoded&0x100 != 0, + TTL: uint8(decoded), + BaseLayer: BaseLayer{data[:4], data[4:]}, + } + p.AddLayer(mpls) + if mpls.StackBottom { + return p.NextDecoder(MPLSPayloadDecoder) + } + return p.NextDecoder(gopacket.DecodeFunc(decodeMPLS)) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (m *MPLS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(4) + if err != nil { + return err + } + encoded := m.Label << 12 + encoded |= uint32(m.TrafficClass) << 9 + encoded |= uint32(m.TTL) + if m.StackBottom { + encoded |= 0x100 + } + binary.BigEndian.PutUint32(bytes, encoded) + return nil +} diff --git a/vendor/github.com/google/gopacket/layers/ndp.go b/vendor/github.com/google/gopacket/layers/ndp.go new file mode 100644 index 0000000000..f7ca1b26b7 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/ndp.go @@ -0,0 +1,611 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +// Enum types courtesy of... +// http://anonsvn.wireshark.org/wireshark/trunk/epan/dissectors/packet-ndp.c + +package layers + +import ( + "fmt" + "github.com/google/gopacket" + "net" +) + +type NDPChassisType uint8 + +// Nortel Chassis Types +const ( + NDPChassisother NDPChassisType = 1 + NDPChassis3000 NDPChassisType = 2 + NDPChassis3030 NDPChassisType = 3 + NDPChassis2310 NDPChassisType = 4 + NDPChassis2810 NDPChassisType = 5 + NDPChassis2912 NDPChassisType = 6 + NDPChassis2914 NDPChassisType = 7 + NDPChassis271x NDPChassisType = 8 + NDPChassis2813 NDPChassisType = 9 + NDPChassis2814 NDPChassisType = 10 + NDPChassis2915 NDPChassisType = 11 + NDPChassis5000 NDPChassisType = 12 + NDPChassis2813SA NDPChassisType = 13 + NDPChassis2814SA NDPChassisType = 14 + NDPChassis810M NDPChassisType = 15 + NDPChassisEthercell NDPChassisType = 16 + NDPChassis5005 NDPChassisType = 17 + NDPChassisAlcatelEWC NDPChassisType = 18 + NDPChassis2715SA NDPChassisType = 20 + NDPChassis2486 NDPChassisType = 21 + NDPChassis28000series NDPChassisType = 22 + NDPChassis23000series NDPChassisType = 23 + NDPChassis5DN00xseries NDPChassisType = 24 + NDPChassisBayStackEthernet NDPChassisType = 25 + NDPChassis23100series NDPChassisType = 26 + NDPChassis100BaseTHub NDPChassisType = 27 + NDPChassis3000FastEthernet NDPChassisType = 28 + NDPChassisOrionSwitch NDPChassisType = 29 + NDPChassisDDS NDPChassisType = 31 + NDPChassisCentillion6slot NDPChassisType = 32 + NDPChassisCentillion12slot NDPChassisType = 33 + NDPChassisCentillion1slot NDPChassisType = 34 + NDPChassisBayStack301 NDPChassisType = 35 + NDPChassisBayStackTokenRingHub NDPChassisType = 36 + NDPChassisFVCMultimediaSwitch NDPChassisType = 37 + NDPChassisSwitchNode NDPChassisType = 38 + NDPChassisBayStack302Switch NDPChassisType = 39 + NDPChassisBayStack350Switch NDPChassisType = 40 + NDPChassisBayStack150EthernetHub NDPChassisType = 41 + NDPChassisCentillion50NSwitch NDPChassisType = 42 + NDPChassisCentillion50TSwitch NDPChassisType = 43 + NDPChassisBayStack303304Switches NDPChassisType = 44 + NDPChassisBayStack200EthernetHub NDPChassisType = 45 + NDPChassisBayStack25010100EthernetHub NDPChassisType = 46 + NDPChassisBayStack450101001000Switches NDPChassisType = 48 + NDPChassisBayStack41010100Switches NDPChassisType = 49 + NDPChassisPassport1200L3Switch NDPChassisType = 50 + NDPChassisPassport1250L3Switch NDPChassisType = 51 + NDPChassisPassport1100L3Switch NDPChassisType = 52 + NDPChassisPassport1150L3Switch NDPChassisType = 53 + NDPChassisPassport1050L3Switch NDPChassisType = 54 + NDPChassisPassport1051L3Switch NDPChassisType = 55 + NDPChassisPassport8610L3Switch NDPChassisType = 56 + NDPChassisPassport8606L3Switch NDPChassisType = 57 + NDPChassisPassport8010 NDPChassisType = 58 + NDPChassisPassport8006 NDPChassisType = 59 + NDPChassisBayStack670wirelessaccesspoint NDPChassisType = 60 + NDPChassisPassport740 NDPChassisType = 61 + NDPChassisPassport750 NDPChassisType = 62 + NDPChassisPassport790 NDPChassisType = 63 + NDPChassisBusinessPolicySwitch200010100Switches NDPChassisType = 64 + NDPChassisPassport8110L2Switch NDPChassisType = 65 + NDPChassisPassport8106L2Switch NDPChassisType = 66 + NDPChassisBayStack3580GigSwitch NDPChassisType = 67 + NDPChassisBayStack10PowerSupplyUnit NDPChassisType = 68 + NDPChassisBayStack42010100Switch NDPChassisType = 69 + NDPChassisOPTeraMetro1200EthernetServiceModule NDPChassisType = 70 + NDPChassisOPTera8010co NDPChassisType = 71 + NDPChassisOPTera8610coL3Switch NDPChassisType = 72 + NDPChassisOPTera8110coL2Switch NDPChassisType = 73 + NDPChassisOPTera8003 NDPChassisType = 74 + NDPChassisOPTera8603L3Switch NDPChassisType = 75 + NDPChassisOPTera8103L2Switch NDPChassisType = 76 + NDPChassisBayStack380101001000Switch NDPChassisType = 77 + NDPChassisEthernetSwitch47048T NDPChassisType = 78 + NDPChassisOPTeraMetro1450EthernetServiceModule NDPChassisType = 79 + NDPChassisOPTeraMetro1400EthernetServiceModule NDPChassisType = 80 + NDPChassisAlteonSwitchFamily NDPChassisType = 81 + NDPChassisEthernetSwitch46024TPWR NDPChassisType = 82 + NDPChassisOPTeraMetro8010OPML2Switch NDPChassisType = 83 + NDPChassisOPTeraMetro8010coOPML2Switch NDPChassisType = 84 + NDPChassisOPTeraMetro8006OPML2Switch NDPChassisType = 85 + NDPChassisOPTeraMetro8003OPML2Switch NDPChassisType = 86 + NDPChassisAlteon180e NDPChassisType = 87 + NDPChassisAlteonAD3 NDPChassisType = 88 + NDPChassisAlteon184 NDPChassisType = 89 + NDPChassisAlteonAD4 NDPChassisType = 90 + NDPChassisPassport1424L3Switch NDPChassisType = 91 + NDPChassisPassport1648L3Switch NDPChassisType = 92 + NDPChassisPassport1612L3Switch NDPChassisType = 93 + NDPChassisPassport1624L3Switch NDPChassisType = 94 + NDPChassisBayStack38024FFiber1000Switch NDPChassisType = 95 + NDPChassisEthernetRoutingSwitch551024T NDPChassisType = 96 + NDPChassisEthernetRoutingSwitch551048T NDPChassisType = 97 + NDPChassisEthernetSwitch47024T NDPChassisType = 98 + NDPChassisNortelNetworksWirelessLANAccessPoint2220 NDPChassisType = 99 + NDPChassisPassportRBS2402L3Switch NDPChassisType = 100 + NDPChassisAlteonApplicationSwitch2424 NDPChassisType = 101 + NDPChassisAlteonApplicationSwitch2224 NDPChassisType = 102 + NDPChassisAlteonApplicationSwitch2208 NDPChassisType = 103 + NDPChassisAlteonApplicationSwitch2216 NDPChassisType = 104 + NDPChassisAlteonApplicationSwitch3408 NDPChassisType = 105 + NDPChassisAlteonApplicationSwitch3416 NDPChassisType = 106 + NDPChassisNortelNetworksWirelessLANSecuritySwitch2250 NDPChassisType = 107 + NDPChassisEthernetSwitch42548T NDPChassisType = 108 + NDPChassisEthernetSwitch42524T NDPChassisType = 109 + NDPChassisNortelNetworksWirelessLANAccessPoint2221 NDPChassisType = 110 + NDPChassisNortelMetroEthernetServiceUnit24TSPFswitch NDPChassisType = 111 + NDPChassisNortelMetroEthernetServiceUnit24TLXDCswitch NDPChassisType = 112 + NDPChassisPassport830010slotchassis NDPChassisType = 113 + NDPChassisPassport83006slotchassis NDPChassisType = 114 + NDPChassisEthernetRoutingSwitch552024TPWR NDPChassisType = 115 + NDPChassisEthernetRoutingSwitch552048TPWR NDPChassisType = 116 + NDPChassisNortelNetworksVPNGateway3050 NDPChassisType = 117 + NDPChassisAlteonSSL31010100 NDPChassisType = 118 + NDPChassisAlteonSSL31010100Fiber NDPChassisType = 119 + NDPChassisAlteonSSL31010100FIPS NDPChassisType = 120 + NDPChassisAlteonSSL410101001000 NDPChassisType = 121 + NDPChassisAlteonSSL410101001000Fiber NDPChassisType = 122 + NDPChassisAlteonApplicationSwitch2424SSL NDPChassisType = 123 + NDPChassisEthernetSwitch32524T NDPChassisType = 124 + NDPChassisEthernetSwitch32524G NDPChassisType = 125 + NDPChassisNortelNetworksWirelessLANAccessPoint2225 NDPChassisType = 126 + NDPChassisNortelNetworksWirelessLANSecuritySwitch2270 NDPChassisType = 127 + NDPChassis24portEthernetSwitch47024TPWR NDPChassisType = 128 + NDPChassis48portEthernetSwitch47048TPWR NDPChassisType = 129 + NDPChassisEthernetRoutingSwitch553024TFD NDPChassisType = 130 + NDPChassisEthernetSwitch351024T NDPChassisType = 131 + NDPChassisNortelMetroEthernetServiceUnit12GACL3Switch NDPChassisType = 132 + NDPChassisNortelMetroEthernetServiceUnit12GDCL3Switch NDPChassisType = 133 + NDPChassisNortelSecureAccessSwitch NDPChassisType = 134 + NDPChassisNortelNetworksVPNGateway3070 NDPChassisType = 135 + NDPChassisOPTeraMetro3500 NDPChassisType = 136 + NDPChassisSMBBES101024T NDPChassisType = 137 + NDPChassisSMBBES101048T NDPChassisType = 138 + NDPChassisSMBBES102024TPWR NDPChassisType = 139 + NDPChassisSMBBES102048TPWR NDPChassisType = 140 + NDPChassisSMBBES201024T NDPChassisType = 141 + NDPChassisSMBBES201048T NDPChassisType = 142 + NDPChassisSMBBES202024TPWR NDPChassisType = 143 + NDPChassisSMBBES202048TPWR NDPChassisType = 144 + NDPChassisSMBBES11024T NDPChassisType = 145 + NDPChassisSMBBES11048T NDPChassisType = 146 + NDPChassisSMBBES12024TPWR NDPChassisType = 147 + NDPChassisSMBBES12048TPWR NDPChassisType = 148 + NDPChassisSMBBES21024T NDPChassisType = 149 + NDPChassisSMBBES21048T NDPChassisType = 150 + NDPChassisSMBBES22024TPWR NDPChassisType = 151 + NDPChassisSMBBES22048TPWR NDPChassisType = 152 + NDPChassisOME6500 NDPChassisType = 153 + NDPChassisEthernetRoutingSwitch4548GT NDPChassisType = 154 + NDPChassisEthernetRoutingSwitch4548GTPWR NDPChassisType = 155 + NDPChassisEthernetRoutingSwitch4550T NDPChassisType = 156 + NDPChassisEthernetRoutingSwitch4550TPWR NDPChassisType = 157 + NDPChassisEthernetRoutingSwitch4526FX NDPChassisType = 158 + NDPChassisEthernetRoutingSwitch250026T NDPChassisType = 159 + NDPChassisEthernetRoutingSwitch250026TPWR NDPChassisType = 160 + NDPChassisEthernetRoutingSwitch250050T NDPChassisType = 161 + NDPChassisEthernetRoutingSwitch250050TPWR NDPChassisType = 162 +) + +type NDPBackplaneType uint8 + +// Nortel Backplane Types +const ( + NDPBackplaneOther NDPBackplaneType = 1 + NDPBackplaneEthernet NDPBackplaneType = 2 + NDPBackplaneEthernetTokenring NDPBackplaneType = 3 + NDPBackplaneEthernetFDDI NDPBackplaneType = 4 + NDPBackplaneEthernetTokenringFDDI NDPBackplaneType = 5 + NDPBackplaneEthernetTokenringRedundantPower NDPBackplaneType = 6 + NDPBackplaneEthernetTokenringFDDIRedundantPower NDPBackplaneType = 7 + NDPBackplaneTokenRing NDPBackplaneType = 8 + NDPBackplaneEthernetTokenringFastEthernet NDPBackplaneType = 9 + NDPBackplaneEthernetFastEthernet NDPBackplaneType = 10 + NDPBackplaneEthernetTokenringFastEthernetRedundantPower NDPBackplaneType = 11 + NDPBackplaneEthernetFastEthernetGigabitEthernet NDPBackplaneType = 12 +) + +type NDPState uint8 + +// Device State +const ( + NDPStateTopology NDPState = 1 + NDPStateHeartbeat NDPState = 2 + NDPStateNew NDPState = 3 +) + +// NortelDiscovery is a packet layer containing the Nortel Discovery Protocol. +type NortelDiscovery struct { + BaseLayer + IPAddress net.IP + SegmentID []byte + Chassis NDPChassisType + Backplane NDPBackplaneType + State NDPState + NumLinks uint8 +} + +// LayerType returns gopacket.LayerTypeNortelDiscovery. +func (c *NortelDiscovery) LayerType() gopacket.LayerType { + return LayerTypeNortelDiscovery +} + +func decodeNortelDiscovery(data []byte, p gopacket.PacketBuilder) error { + c := &NortelDiscovery{} + if len(data) < 11 { + return fmt.Errorf("Invalid NortelDiscovery packet length %d", len(data)) + } + c.IPAddress = data[0:4] + c.SegmentID = data[4:7] + c.Chassis = NDPChassisType(data[7]) + c.Backplane = NDPBackplaneType(data[8]) + c.State = NDPState(data[9]) + c.NumLinks = uint8(data[10]) + p.AddLayer(c) + return nil +} + +func (t NDPChassisType) String() (s string) { + switch t { + case NDPChassisother: + s = "other" + case NDPChassis3000: + s = "3000" + case NDPChassis3030: + s = "3030" + case NDPChassis2310: + s = "2310" + case NDPChassis2810: + s = "2810" + case NDPChassis2912: + s = "2912" + case NDPChassis2914: + s = "2914" + case NDPChassis271x: + s = "271x" + case NDPChassis2813: + s = "2813" + case NDPChassis2814: + s = "2814" + case NDPChassis2915: + s = "2915" + case NDPChassis5000: + s = "5000" + case NDPChassis2813SA: + s = "2813SA" + case NDPChassis2814SA: + s = "2814SA" + case NDPChassis810M: + s = "810M" + case NDPChassisEthercell: + s = "Ethercell" + case NDPChassis5005: + s = "5005" + case NDPChassisAlcatelEWC: + s = "Alcatel Ethernet workgroup conc." + case NDPChassis2715SA: + s = "2715SA" + case NDPChassis2486: + s = "2486" + case NDPChassis28000series: + s = "28000 series" + case NDPChassis23000series: + s = "23000 series" + case NDPChassis5DN00xseries: + s = "5DN00x series" + case NDPChassisBayStackEthernet: + s = "BayStack Ethernet" + case NDPChassis23100series: + s = "23100 series" + case NDPChassis100BaseTHub: + s = "100Base-T Hub" + case NDPChassis3000FastEthernet: + s = "3000 Fast Ethernet" + case NDPChassisOrionSwitch: + s = "Orion switch" + case NDPChassisDDS: + s = "DDS" + case NDPChassisCentillion6slot: + s = "Centillion (6 slot)" + case NDPChassisCentillion12slot: + s = "Centillion (12 slot)" + case NDPChassisCentillion1slot: + s = "Centillion (1 slot)" + case NDPChassisBayStack301: + s = "BayStack 301" + case NDPChassisBayStackTokenRingHub: + s = "BayStack TokenRing Hub" + case NDPChassisFVCMultimediaSwitch: + s = "FVC Multimedia Switch" + case NDPChassisSwitchNode: + s = "Switch Node" + case NDPChassisBayStack302Switch: + s = "BayStack 302 Switch" + case NDPChassisBayStack350Switch: + s = "BayStack 350 Switch" + case NDPChassisBayStack150EthernetHub: + s = "BayStack 150 Ethernet Hub" + case NDPChassisCentillion50NSwitch: + s = "Centillion 50N switch" + case NDPChassisCentillion50TSwitch: + s = "Centillion 50T switch" + case NDPChassisBayStack303304Switches: + s = "BayStack 303 and 304 Switches" + case NDPChassisBayStack200EthernetHub: + s = "BayStack 200 Ethernet Hub" + case NDPChassisBayStack25010100EthernetHub: + s = "BayStack 250 10/100 Ethernet Hub" + case NDPChassisBayStack450101001000Switches: + s = "BayStack 450 10/100/1000 Switches" + case NDPChassisBayStack41010100Switches: + s = "BayStack 410 10/100 Switches" + case NDPChassisPassport1200L3Switch: + s = "Passport 1200 L3 Switch" + case NDPChassisPassport1250L3Switch: + s = "Passport 1250 L3 Switch" + case NDPChassisPassport1100L3Switch: + s = "Passport 1100 L3 Switch" + case NDPChassisPassport1150L3Switch: + s = "Passport 1150 L3 Switch" + case NDPChassisPassport1050L3Switch: + s = "Passport 1050 L3 Switch" + case NDPChassisPassport1051L3Switch: + s = "Passport 1051 L3 Switch" + case NDPChassisPassport8610L3Switch: + s = "Passport 8610 L3 Switch" + case NDPChassisPassport8606L3Switch: + s = "Passport 8606 L3 Switch" + case NDPChassisPassport8010: + s = "Passport 8010" + case NDPChassisPassport8006: + s = "Passport 8006" + case NDPChassisBayStack670wirelessaccesspoint: + s = "BayStack 670 wireless access point" + case NDPChassisPassport740: + s = "Passport 740" + case NDPChassisPassport750: + s = "Passport 750" + case NDPChassisPassport790: + s = "Passport 790" + case NDPChassisBusinessPolicySwitch200010100Switches: + s = "Business Policy Switch 2000 10/100 Switches" + case NDPChassisPassport8110L2Switch: + s = "Passport 8110 L2 Switch" + case NDPChassisPassport8106L2Switch: + s = "Passport 8106 L2 Switch" + case NDPChassisBayStack3580GigSwitch: + s = "BayStack 3580 Gig Switch" + case NDPChassisBayStack10PowerSupplyUnit: + s = "BayStack 10 Power Supply Unit" + case NDPChassisBayStack42010100Switch: + s = "BayStack 420 10/100 Switch" + case NDPChassisOPTeraMetro1200EthernetServiceModule: + s = "OPTera Metro 1200 Ethernet Service Module" + case NDPChassisOPTera8010co: + s = "OPTera 8010co" + case NDPChassisOPTera8610coL3Switch: + s = "OPTera 8610co L3 switch" + case NDPChassisOPTera8110coL2Switch: + s = "OPTera 8110co L2 switch" + case NDPChassisOPTera8003: + s = "OPTera 8003" + case NDPChassisOPTera8603L3Switch: + s = "OPTera 8603 L3 switch" + case NDPChassisOPTera8103L2Switch: + s = "OPTera 8103 L2 switch" + case NDPChassisBayStack380101001000Switch: + s = "BayStack 380 10/100/1000 Switch" + case NDPChassisEthernetSwitch47048T: + s = "Ethernet Switch 470-48T" + case NDPChassisOPTeraMetro1450EthernetServiceModule: + s = "OPTera Metro 1450 Ethernet Service Module" + case NDPChassisOPTeraMetro1400EthernetServiceModule: + s = "OPTera Metro 1400 Ethernet Service Module" + case NDPChassisAlteonSwitchFamily: + s = "Alteon Switch Family" + case NDPChassisEthernetSwitch46024TPWR: + s = "Ethernet Switch 460-24T-PWR" + case NDPChassisOPTeraMetro8010OPML2Switch: + s = "OPTera Metro 8010 OPM L2 Switch" + case NDPChassisOPTeraMetro8010coOPML2Switch: + s = "OPTera Metro 8010co OPM L2 Switch" + case NDPChassisOPTeraMetro8006OPML2Switch: + s = "OPTera Metro 8006 OPM L2 Switch" + case NDPChassisOPTeraMetro8003OPML2Switch: + s = "OPTera Metro 8003 OPM L2 Switch" + case NDPChassisAlteon180e: + s = "Alteon 180e" + case NDPChassisAlteonAD3: + s = "Alteon AD3" + case NDPChassisAlteon184: + s = "Alteon 184" + case NDPChassisAlteonAD4: + s = "Alteon AD4" + case NDPChassisPassport1424L3Switch: + s = "Passport 1424 L3 switch" + case NDPChassisPassport1648L3Switch: + s = "Passport 1648 L3 switch" + case NDPChassisPassport1612L3Switch: + s = "Passport 1612 L3 switch" + case NDPChassisPassport1624L3Switch: + s = "Passport 1624 L3 switch" + case NDPChassisBayStack38024FFiber1000Switch: + s = "BayStack 380-24F Fiber 1000 Switch" + case NDPChassisEthernetRoutingSwitch551024T: + s = "Ethernet Routing Switch 5510-24T" + case NDPChassisEthernetRoutingSwitch551048T: + s = "Ethernet Routing Switch 5510-48T" + case NDPChassisEthernetSwitch47024T: + s = "Ethernet Switch 470-24T" + case NDPChassisNortelNetworksWirelessLANAccessPoint2220: + s = "Nortel Networks Wireless LAN Access Point 2220" + case NDPChassisPassportRBS2402L3Switch: + s = "Passport RBS 2402 L3 switch" + case NDPChassisAlteonApplicationSwitch2424: + s = "Alteon Application Switch 2424" + case NDPChassisAlteonApplicationSwitch2224: + s = "Alteon Application Switch 2224" + case NDPChassisAlteonApplicationSwitch2208: + s = "Alteon Application Switch 2208" + case NDPChassisAlteonApplicationSwitch2216: + s = "Alteon Application Switch 2216" + case NDPChassisAlteonApplicationSwitch3408: + s = "Alteon Application Switch 3408" + case NDPChassisAlteonApplicationSwitch3416: + s = "Alteon Application Switch 3416" + case NDPChassisNortelNetworksWirelessLANSecuritySwitch2250: + s = "Nortel Networks Wireless LAN SecuritySwitch 2250" + case NDPChassisEthernetSwitch42548T: + s = "Ethernet Switch 425-48T" + case NDPChassisEthernetSwitch42524T: + s = "Ethernet Switch 425-24T" + case NDPChassisNortelNetworksWirelessLANAccessPoint2221: + s = "Nortel Networks Wireless LAN Access Point 2221" + case NDPChassisNortelMetroEthernetServiceUnit24TSPFswitch: + s = "Nortel Metro Ethernet Service Unit 24-T SPF switch" + case NDPChassisNortelMetroEthernetServiceUnit24TLXDCswitch: + s = " Nortel Metro Ethernet Service Unit 24-T LX DC switch" + case NDPChassisPassport830010slotchassis: + s = "Passport 8300 10-slot chassis" + case NDPChassisPassport83006slotchassis: + s = "Passport 8300 6-slot chassis" + case NDPChassisEthernetRoutingSwitch552024TPWR: + s = "Ethernet Routing Switch 5520-24T-PWR" + case NDPChassisEthernetRoutingSwitch552048TPWR: + s = "Ethernet Routing Switch 5520-48T-PWR" + case NDPChassisNortelNetworksVPNGateway3050: + s = "Nortel Networks VPN Gateway 3050" + case NDPChassisAlteonSSL31010100: + s = "Alteon SSL 310 10/100" + case NDPChassisAlteonSSL31010100Fiber: + s = "Alteon SSL 310 10/100 Fiber" + case NDPChassisAlteonSSL31010100FIPS: + s = "Alteon SSL 310 10/100 FIPS" + case NDPChassisAlteonSSL410101001000: + s = "Alteon SSL 410 10/100/1000" + case NDPChassisAlteonSSL410101001000Fiber: + s = "Alteon SSL 410 10/100/1000 Fiber" + case NDPChassisAlteonApplicationSwitch2424SSL: + s = "Alteon Application Switch 2424-SSL" + case NDPChassisEthernetSwitch32524T: + s = "Ethernet Switch 325-24T" + case NDPChassisEthernetSwitch32524G: + s = "Ethernet Switch 325-24G" + case NDPChassisNortelNetworksWirelessLANAccessPoint2225: + s = "Nortel Networks Wireless LAN Access Point 2225" + case NDPChassisNortelNetworksWirelessLANSecuritySwitch2270: + s = "Nortel Networks Wireless LAN SecuritySwitch 2270" + case NDPChassis24portEthernetSwitch47024TPWR: + s = "24-port Ethernet Switch 470-24T-PWR" + case NDPChassis48portEthernetSwitch47048TPWR: + s = "48-port Ethernet Switch 470-48T-PWR" + case NDPChassisEthernetRoutingSwitch553024TFD: + s = "Ethernet Routing Switch 5530-24TFD" + case NDPChassisEthernetSwitch351024T: + s = "Ethernet Switch 3510-24T" + case NDPChassisNortelMetroEthernetServiceUnit12GACL3Switch: + s = "Nortel Metro Ethernet Service Unit 12G AC L3 switch" + case NDPChassisNortelMetroEthernetServiceUnit12GDCL3Switch: + s = "Nortel Metro Ethernet Service Unit 12G DC L3 switch" + case NDPChassisNortelSecureAccessSwitch: + s = "Nortel Secure Access Switch" + case NDPChassisNortelNetworksVPNGateway3070: + s = "Nortel Networks VPN Gateway 3070" + case NDPChassisOPTeraMetro3500: + s = "OPTera Metro 3500" + case NDPChassisSMBBES101024T: + s = "SMB BES 1010 24T" + case NDPChassisSMBBES101048T: + s = "SMB BES 1010 48T" + case NDPChassisSMBBES102024TPWR: + s = "SMB BES 1020 24T PWR" + case NDPChassisSMBBES102048TPWR: + s = "SMB BES 1020 48T PWR" + case NDPChassisSMBBES201024T: + s = "SMB BES 2010 24T" + case NDPChassisSMBBES201048T: + s = "SMB BES 2010 48T" + case NDPChassisSMBBES202024TPWR: + s = "SMB BES 2020 24T PWR" + case NDPChassisSMBBES202048TPWR: + s = "SMB BES 2020 48T PWR" + case NDPChassisSMBBES11024T: + s = "SMB BES 110 24T" + case NDPChassisSMBBES11048T: + s = "SMB BES 110 48T" + case NDPChassisSMBBES12024TPWR: + s = "SMB BES 120 24T PWR" + case NDPChassisSMBBES12048TPWR: + s = "SMB BES 120 48T PWR" + case NDPChassisSMBBES21024T: + s = "SMB BES 210 24T" + case NDPChassisSMBBES21048T: + s = "SMB BES 210 48T" + case NDPChassisSMBBES22024TPWR: + s = "SMB BES 220 24T PWR" + case NDPChassisSMBBES22048TPWR: + s = "SMB BES 220 48T PWR" + case NDPChassisOME6500: + s = "OME 6500" + case NDPChassisEthernetRoutingSwitch4548GT: + s = "Ethernet Routing Switch 4548GT" + case NDPChassisEthernetRoutingSwitch4548GTPWR: + s = "Ethernet Routing Switch 4548GT-PWR" + case NDPChassisEthernetRoutingSwitch4550T: + s = "Ethernet Routing Switch 4550T" + case NDPChassisEthernetRoutingSwitch4550TPWR: + s = "Ethernet Routing Switch 4550T-PWR" + case NDPChassisEthernetRoutingSwitch4526FX: + s = "Ethernet Routing Switch 4526FX" + case NDPChassisEthernetRoutingSwitch250026T: + s = "Ethernet Routing Switch 2500-26T" + case NDPChassisEthernetRoutingSwitch250026TPWR: + s = "Ethernet Routing Switch 2500-26T-PWR" + case NDPChassisEthernetRoutingSwitch250050T: + s = "Ethernet Routing Switch 2500-50T" + case NDPChassisEthernetRoutingSwitch250050TPWR: + s = "Ethernet Routing Switch 2500-50T-PWR" + default: + s = "Unknown" + } + return +} + +func (t NDPBackplaneType) String() (s string) { + switch t { + case NDPBackplaneOther: + s = "Other" + case NDPBackplaneEthernet: + s = "Ethernet" + case NDPBackplaneEthernetTokenring: + s = "Ethernet and Tokenring" + case NDPBackplaneEthernetFDDI: + s = "Ethernet and FDDI" + case NDPBackplaneEthernetTokenringFDDI: + s = "Ethernet, Tokenring and FDDI" + case NDPBackplaneEthernetTokenringRedundantPower: + s = "Ethernet and Tokenring with redundant power" + case NDPBackplaneEthernetTokenringFDDIRedundantPower: + s = "Ethernet, Tokenring, FDDI with redundant power" + case NDPBackplaneTokenRing: + s = "Token Ring" + case NDPBackplaneEthernetTokenringFastEthernet: + s = "Ethernet, Tokenring and Fast Ethernet" + case NDPBackplaneEthernetFastEthernet: + s = "Ethernet and Fast Ethernet" + case NDPBackplaneEthernetTokenringFastEthernetRedundantPower: + s = "Ethernet, Tokenring, Fast Ethernet with redundant power" + case NDPBackplaneEthernetFastEthernetGigabitEthernet: + s = "Ethernet, Fast Ethernet and Gigabit Ethernet" + default: + s = "Unknown" + } + return +} + +func (t NDPState) String() (s string) { + switch t { + case NDPStateTopology: + s = "Topology Change" + case NDPStateHeartbeat: + s = "Heartbeat" + case NDPStateNew: + s = "New" + default: + s = "Unknown" + } + return +} diff --git a/vendor/github.com/google/gopacket/layers/ntp.go b/vendor/github.com/google/gopacket/layers/ntp.go new file mode 100644 index 0000000000..33c15b3b39 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/ntp.go @@ -0,0 +1,416 @@ +// Copyright 2016 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. +// +//****************************************************************************** + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/google/gopacket" +) + +//****************************************************************************** +// +// Network Time Protocol (NTP) Decoding Layer +// ------------------------------------------ +// This file provides a GoPacket decoding layer for NTP. +// +//****************************************************************************** +// +// About The Network Time Protocol (NTP) +// ------------------------------------- +// NTP is a protocol that enables computers on the internet to set their +// clocks to the correct time (or to a time that is acceptably close to the +// correct time). NTP runs on top of UDP. +// +// There have been a series of versions of the NTP protocol. The latest +// version is V4 and is specified in RFC 5905: +// http://www.ietf.org/rfc/rfc5905.txt +// +//****************************************************************************** +// +// References +// ---------- +// +// Wikipedia's NTP entry: +// https://en.wikipedia.org/wiki/Network_Time_Protocol +// This is the best place to get an overview of NTP. +// +// Network Time Protocol Home Website: +// http://www.ntp.org/ +// This appears to be the official website of NTP. +// +// List of current NTP Protocol RFCs: +// http://www.ntp.org/rfc.html +// +// RFC 958: "Network Time Protocol (NTP)" (1985) +// https://tools.ietf.org/html/rfc958 +// This is the original NTP specification. +// +// RFC 1305: "Network Time Protocol (Version 3) Specification, Implementation and Analysis" (1992) +// https://tools.ietf.org/html/rfc1305 +// The protocol was updated in 1992 yielding NTP V3. +// +// RFC 5905: "Network Time Protocol Version 4: Protocol and Algorithms Specification" (2010) +// https://www.ietf.org/rfc/rfc5905.txt +// The protocol was updated in 2010 yielding NTP V4. +// V4 is backwards compatible with all previous versions of NTP. +// +// RFC 5906: "Network Time Protocol Version 4: Autokey Specification" +// https://tools.ietf.org/html/rfc5906 +// This document addresses the security of the NTP protocol +// and is probably not relevant to this package. +// +// RFC 5907: "Definitions of Managed Objects for Network Time Protocol Version 4 (NTPv4)" +// https://tools.ietf.org/html/rfc5907 +// This document addresses the management of NTP servers and +// is probably not relevant to this package. +// +// RFC 5908: "Network Time Protocol (NTP) Server Option for DHCPv6" +// https://tools.ietf.org/html/rfc5908 +// This document addresses the use of NTP in DHCPv6 and is +// probably not relevant to this package. +// +// "Let's make a NTP Client in C" +// https://lettier.github.io/posts/2016-04-26-lets-make-a-ntp-client-in-c.html +// This web page contains useful information about the details of NTP, +// including an NTP record struture in C, and C code. +// +// "NTP Packet Header (NTP Reference Implementation) (Computer Network Time Synchronization)" +// http://what-when-how.com/computer-network-time-synchronization/ +// ntp-packet-header-ntp-reference-implementation-computer-network-time-synchronization/ +// This web page contains useful information on the details of NTP. +// +// "Technical information - NTP Data Packet" +// https://www.meinbergglobal.com/english/info/ntp-packet.htm +// This page has a helpful diagram of an NTP V4 packet. +// +//****************************************************************************** +// +// Obsolete References +// ------------------- +// +// RFC 1119: "RFC-1119 "Network Time Protocol (Version 2) Specification and Implementation" (1989) +// https://tools.ietf.org/html/rfc1119 +// Version 2 was drafted in 1989. +// It is unclear whether V2 was ever implememented or whether the +// ideas ended up in V3 (which was implemented in 1992). +// +// RFC 1361: "Simple Network Time Protocol (SNTP)" +// https://tools.ietf.org/html/rfc1361 +// This document is obsoleted by RFC 1769 and is included only for completeness. +// +// RFC 1769: "Simple Network Time Protocol (SNTP)" +// https://tools.ietf.org/html/rfc1769 +// This document is obsoleted by RFC 2030 and RFC 4330 and is included only for completeness. +// +// RFC 2030: "Simple Network Time Protocol (SNTP) Version 4 for IPv4, IPv6 and OSI" +// https://tools.ietf.org/html/rfc2030 +// This document is obsoleted by RFC 4330 and is included only for completeness. +// +// RFC 4330: "Simple Network Time Protocol (SNTP) Version 4 for IPv4, IPv6 and OSI" +// https://tools.ietf.org/html/rfc4330 +// This document is obsoleted by RFC 5905 and is included only for completeness. +// +//****************************************************************************** +// +// Endian And Bit Numbering Issues +// ------------------------------- +// +// Endian and bit numbering issues can be confusing. Here is some +// clarification: +// +// ENDIAN: Values are sent big endian. +// https://en.wikipedia.org/wiki/Endianness +// +// BIT NUMBERING: Bits are numbered 0 upwards from the most significant +// bit to the least significant bit. This means that if there is a 32-bit +// value, the most significant bit is called bit 0 and the least +// significant bit is called bit 31. +// +// See RFC 791 Appendix B for more discussion. +// +//****************************************************************************** +// +// NTP V3 and V4 Packet Format +// --------------------------- +// NTP packets are UDP packets whose payload contains an NTP record. +// +// The NTP RFC defines the format of the NTP record. +// +// There have been four versions of the protocol: +// +// V1 in 1985 +// V2 in 1989 +// V3 in 1992 +// V4 in 2010 +// +// It is clear that V1 and V2 are obsolete, and there is no need to +// cater for these formats. +// +// V3 and V4 essentially use the same format, with V4 adding some optional +// fields on the end. So this package supports the V3 and V4 formats. +// +// The current version of NTP (NTP V4)'s RFC (V4 - RFC 5905) contains +// the following diagram for the NTP record format: + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |LI | VN |Mode | Stratum | Poll | Precision | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Root Delay | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Root Dispersion | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Reference ID | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// + Reference Timestamp (64) + +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// + Origin Timestamp (64) + +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// + Receive Timestamp (64) + +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// + Transmit Timestamp (64) + +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Extension Field 1 (variable) . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Extension Field 2 (variable) . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Key Identifier | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// | dgst (128) | +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// From http://www.ietf.org/rfc/rfc5905.txt +// +// The fields "Extension Field 1 (variable)" and later are optional fields, +// and so we can set a minimum NTP record size of 48 bytes. +// +const ntpMinimumRecordSizeInBytes int = 48 + +//****************************************************************************** + +// NTP Type +// -------- +// Type NTP implements the DecodingLayer interface. Each NTP object +// represents in a structured form the NTP record present as the UDP +// payload in an NTP UDP packet. +// + +type NTPLeapIndicator uint8 +type NTPVersion uint8 +type NTPMode uint8 +type NTPStratum uint8 +type NTPLog2Seconds int8 +type NTPFixed16Seconds uint32 +type NTPReferenceID uint32 +type NTPTimestamp uint64 + +type NTP struct { + BaseLayer // Stores the packet bytes and payload bytes. + + LeapIndicator NTPLeapIndicator // [0,3]. Indicates whether leap second(s) is to be added. + Version NTPVersion // [0,7]. Version of the NTP protocol. + Mode NTPMode // [0,7]. Mode. + Stratum NTPStratum // [0,255]. Stratum of time server in the server tree. + Poll NTPLog2Seconds // [-128,127]. The maximum interval between successive messages, in log2 seconds. + Precision NTPLog2Seconds // [-128,127]. The precision of the system clock, in log2 seconds. + RootDelay NTPFixed16Seconds // [0,2^32-1]. Total round trip delay to the reference clock in seconds times 2^16. + RootDispersion NTPFixed16Seconds // [0,2^32-1]. Total dispersion to the reference clock, in seconds times 2^16. + ReferenceID NTPReferenceID // ID code of reference clock [0,2^32-1]. + ReferenceTimestamp NTPTimestamp // Most recent timestamp from the reference clock. + OriginTimestamp NTPTimestamp // Local time when request was sent from local host. + ReceiveTimestamp NTPTimestamp // Local time (on server) that request arrived at server host. + TransmitTimestamp NTPTimestamp // Local time (on server) that request departed server host. + + // FIX: This package should analyse the extension fields and represent the extension fields too. + ExtensionBytes []byte // Just put extensions in a byte slice. +} + +//****************************************************************************** + +// LayerType returns the layer type of the NTP object, which is LayerTypeNTP. +func (d *NTP) LayerType() gopacket.LayerType { + return LayerTypeNTP +} + +//****************************************************************************** + +// decodeNTP analyses a byte slice and attempts to decode it as an NTP +// record of a UDP packet. +// +// If it succeeds, it loads p with information about the packet and returns nil. +// If it fails, it returns an error (non nil). +// +// This function is employed in layertypes.go to register the NTP layer. +func decodeNTP(data []byte, p gopacket.PacketBuilder) error { + + // Attempt to decode the byte slice. + d := &NTP{} + err := d.DecodeFromBytes(data, p) + if err != nil { + return err + } + + // If the decoding worked, add the layer to the packet and set it + // as the application layer too, if there isn't already one. + p.AddLayer(d) + p.SetApplicationLayer(d) + + return nil +} + +//****************************************************************************** + +// DecodeFromBytes analyses a byte slice and attempts to decode it as an NTP +// record of a UDP packet. +// +// Upon succeeds, it loads the NTP object with information about the packet +// and returns nil. +// Upon failure, it returns an error (non nil). +func (d *NTP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + + // If the data block is too short to be a NTP record, then return an error. + if len(data) < ntpMinimumRecordSizeInBytes { + df.SetTruncated() + return errors.New("NTP packet too short") + } + + // RFC 5905 does not appear to define a maximum NTP record length. + // The protocol allows "extension fields" to be included in the record, + // and states about these fields:" + // + // "While the minimum field length containing required fields is + // four words (16 octets), a maximum field length remains to be + // established." + // + // For this reason, the packet length is not checked here for being too long. + + // NTP type embeds type BaseLayer which contains two fields: + // Contents is supposed to contain the bytes of the data at this level. + // Payload is supposed to contain the payload of this level. + // Here we set the baselayer to be the bytes of the NTP record. + d.BaseLayer = BaseLayer{Contents: data[:len(data)]} + + // Extract the fields from the block of bytes. + // To make sense of this, refer to the packet diagram + // above and the section on endian conventions. + + // The first few fields are all packed into the first 32 bits. Unpack them. + f := data[0] + d.LeapIndicator = NTPLeapIndicator((f & 0xC0) >> 6) + d.Version = NTPVersion((f & 0x38) >> 3) + d.Mode = NTPMode(f & 0x07) + d.Stratum = NTPStratum(data[1]) + d.Poll = NTPLog2Seconds(data[2]) + d.Precision = NTPLog2Seconds(data[3]) + + // The remaining fields can just be copied in big endian order. + d.RootDelay = NTPFixed16Seconds(binary.BigEndian.Uint32(data[4:8])) + d.RootDispersion = NTPFixed16Seconds(binary.BigEndian.Uint32(data[8:12])) + d.ReferenceID = NTPReferenceID(binary.BigEndian.Uint32(data[12:16])) + d.ReferenceTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[16:24])) + d.OriginTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[24:32])) + d.ReceiveTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[32:40])) + d.TransmitTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[40:48])) + + // This layer does not attempt to analyse the extension bytes. + // But if there are any, we'd like the user to know. So we just + // place them all in an ExtensionBytes field. + d.ExtensionBytes = data[48:] + + // Return no error. + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (d *NTP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + data, err := b.PrependBytes(ntpMinimumRecordSizeInBytes) + if err != nil { + return err + } + + // Pack the first few fields into the first 32 bits. + h := uint8(0) + h |= (uint8(d.LeapIndicator) << 6) & 0xC0 + h |= (uint8(d.Version) << 3) & 0x38 + h |= (uint8(d.Mode)) & 0x07 + data[0] = byte(h) + data[1] = byte(d.Stratum) + data[2] = byte(d.Poll) + data[3] = byte(d.Precision) + + // The remaining fields can just be copied in big endian order. + binary.BigEndian.PutUint32(data[4:8], uint32(d.RootDelay)) + binary.BigEndian.PutUint32(data[8:12], uint32(d.RootDispersion)) + binary.BigEndian.PutUint32(data[12:16], uint32(d.ReferenceID)) + binary.BigEndian.PutUint64(data[16:24], uint64(d.ReferenceTimestamp)) + binary.BigEndian.PutUint64(data[24:32], uint64(d.OriginTimestamp)) + binary.BigEndian.PutUint64(data[32:40], uint64(d.ReceiveTimestamp)) + binary.BigEndian.PutUint64(data[40:48], uint64(d.TransmitTimestamp)) + + ex, err := b.AppendBytes(len(d.ExtensionBytes)) + if err != nil { + return err + } + copy(ex, d.ExtensionBytes) + + return nil +} + +//****************************************************************************** + +// CanDecode returns a set of layers that NTP objects can decode. +// As NTP objects can only decide the NTP layer, we can return just that layer. +// Apparently a single layer type implements LayerClass. +func (d *NTP) CanDecode() gopacket.LayerClass { + return LayerTypeNTP +} + +//****************************************************************************** + +// NextLayerType specifies the next layer that GoPacket should attempt to +// analyse after this (NTP) layer. As NTP packets do not contain any payload +// bytes, there are no further layers to analyse. +func (d *NTP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +//****************************************************************************** + +// NTP packets do not carry any data payload, so the empty byte slice is retured. +// In Go, a nil slice is functionally identical to an empty slice, so we +// return nil to avoid a heap allocation. +func (d *NTP) Payload() []byte { + return nil +} + +//****************************************************************************** +//* End Of NTP File * +//****************************************************************************** diff --git a/vendor/github.com/google/gopacket/layers/ospf.go b/vendor/github.com/google/gopacket/layers/ospf.go new file mode 100644 index 0000000000..4f5473d065 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/ospf.go @@ -0,0 +1,715 @@ +// Copyright 2017 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/google/gopacket" +) + +// OSPFType denotes what kind of OSPF type it is +type OSPFType uint8 + +// Potential values for OSPF.Type. +const ( + OSPFHello OSPFType = 1 + OSPFDatabaseDescription OSPFType = 2 + OSPFLinkStateRequest OSPFType = 3 + OSPFLinkStateUpdate OSPFType = 4 + OSPFLinkStateAcknowledgment OSPFType = 5 +) + +// LSA Function Codes for LSAheader.LSType +const ( + RouterLSAtypeV2 = 0x1 + RouterLSAtype = 0x2001 + NetworkLSAtypeV2 = 0x2 + NetworkLSAtype = 0x2002 + SummaryLSANetworktypeV2 = 0x3 + InterAreaPrefixLSAtype = 0x2003 + SummaryLSAASBRtypeV2 = 0x4 + InterAreaRouterLSAtype = 0x2004 + ASExternalLSAtypeV2 = 0x5 + ASExternalLSAtype = 0x4005 + NSSALSAtype = 0x2007 + NSSALSAtypeV2 = 0x7 + LinkLSAtype = 0x0008 + IntraAreaPrefixLSAtype = 0x2009 +) + +// String conversions for OSPFType +func (i OSPFType) String() string { + switch i { + case OSPFHello: + return "Hello" + case OSPFDatabaseDescription: + return "Database Description" + case OSPFLinkStateRequest: + return "Link State Request" + case OSPFLinkStateUpdate: + return "Link State Update" + case OSPFLinkStateAcknowledgment: + return "Link State Acknowledgment" + default: + return "" + } +} + +// Prefix extends IntraAreaPrefixLSA +type Prefix struct { + PrefixLength uint8 + PrefixOptions uint8 + Metric uint16 + AddressPrefix []byte +} + +// IntraAreaPrefixLSA is the struct from RFC 5340 A.4.10. +type IntraAreaPrefixLSA struct { + NumOfPrefixes uint16 + RefLSType uint16 + RefLinkStateID uint32 + RefAdvRouter uint32 + Prefixes []Prefix +} + +// LinkLSA is the struct from RFC 5340 A.4.9. +type LinkLSA struct { + RtrPriority uint8 + Options uint32 + LinkLocalAddress []byte + NumOfPrefixes uint32 + Prefixes []Prefix +} + +// ASExternalLSAV2 is the struct from RFC 2328 A.4.5. +type ASExternalLSAV2 struct { + NetworkMask uint32 + ExternalBit uint8 + Metric uint32 + ForwardingAddress uint32 + ExternalRouteTag uint32 +} + +// ASExternalLSA is the struct from RFC 5340 A.4.7. +type ASExternalLSA struct { + Flags uint8 + Metric uint32 + PrefixLength uint8 + PrefixOptions uint8 + RefLSType uint16 + AddressPrefix []byte + ForwardingAddress []byte + ExternalRouteTag uint32 + RefLinkStateID uint32 +} + +// InterAreaRouterLSA is the struct from RFC 5340 A.4.6. +type InterAreaRouterLSA struct { + Options uint32 + Metric uint32 + DestinationRouterID uint32 +} + +// InterAreaPrefixLSA is the struct from RFC 5340 A.4.5. +type InterAreaPrefixLSA struct { + Metric uint32 + PrefixLength uint8 + PrefixOptions uint8 + AddressPrefix []byte +} + +// NetworkLSA is the struct from RFC 5340 A.4.4. +type NetworkLSA struct { + Options uint32 + AttachedRouter []uint32 +} + +// NetworkLSAV2 is the struct from RFC 2328 A.4.3. +type NetworkLSAV2 struct { + NetworkMask uint32 + AttachedRouter []uint32 +} + +// RouterV2 extends RouterLSAV2 +type RouterV2 struct { + Type uint8 + LinkID uint32 + LinkData uint32 + Metric uint16 +} + +// RouterLSAV2 is the struct from RFC 2328 A.4.2. +type RouterLSAV2 struct { + Flags uint8 + Links uint16 + Routers []RouterV2 +} + +// Router extends RouterLSA +type Router struct { + Type uint8 + Metric uint16 + InterfaceID uint32 + NeighborInterfaceID uint32 + NeighborRouterID uint32 +} + +// RouterLSA is the struct from RFC 5340 A.4.3. +type RouterLSA struct { + Flags uint8 + Options uint32 + Routers []Router +} + +// LSAheader is the struct from RFC 5340 A.4.2 and RFC 2328 A.4.1. +type LSAheader struct { + LSAge uint16 + LSType uint16 + LinkStateID uint32 + AdvRouter uint32 + LSSeqNumber uint32 + LSChecksum uint16 + Length uint16 + LSOptions uint8 +} + +// LSA links LSAheader with the structs from RFC 5340 A.4. +type LSA struct { + LSAheader + Content interface{} +} + +// LSUpdate is the struct from RFC 5340 A.3.5. +type LSUpdate struct { + NumOfLSAs uint32 + LSAs []LSA +} + +// LSReq is the struct from RFC 5340 A.3.4. +type LSReq struct { + LSType uint16 + LSID uint32 + AdvRouter uint32 +} + +// DbDescPkg is the struct from RFC 5340 A.3.3. +type DbDescPkg struct { + Options uint32 + InterfaceMTU uint16 + Flags uint16 + DDSeqNumber uint32 + LSAinfo []LSAheader +} + +// HelloPkg is the struct from RFC 5340 A.3.2. +type HelloPkg struct { + InterfaceID uint32 + RtrPriority uint8 + Options uint32 + HelloInterval uint16 + RouterDeadInterval uint32 + DesignatedRouterID uint32 + BackupDesignatedRouterID uint32 + NeighborID []uint32 +} + +// HelloPkgV2 extends the HelloPkg struct with OSPFv2 information +type HelloPkgV2 struct { + HelloPkg + NetworkMask uint32 +} + +// OSPF is a basic OSPF packet header with common fields of Version 2 and Version 3. +type OSPF struct { + Version uint8 + Type OSPFType + PacketLength uint16 + RouterID uint32 + AreaID uint32 + Checksum uint16 + Content interface{} +} + +//OSPFv2 extend the OSPF head with version 2 specific fields +type OSPFv2 struct { + BaseLayer + OSPF + AuType uint16 + Authentication uint64 +} + +// OSPFv3 extend the OSPF head with version 3 specific fields +type OSPFv3 struct { + BaseLayer + OSPF + Instance uint8 + Reserved uint8 +} + +// getLSAsv2 parses the LSA information from the packet for OSPFv2 +func getLSAsv2(num uint32, data []byte) ([]LSA, error) { + var lsas []LSA + var i uint32 = 0 + var offset uint32 = 0 + for ; i < num; i++ { + lstype := uint16(data[offset+3]) + lsalength := binary.BigEndian.Uint16(data[offset+18 : offset+20]) + content, err := extractLSAInformation(lstype, lsalength, data[offset:]) + if err != nil { + return nil, fmt.Errorf("Could not extract Link State type.") + } + lsa := LSA{ + LSAheader: LSAheader{ + LSAge: binary.BigEndian.Uint16(data[offset : offset+2]), + LSOptions: data[offset+2], + LSType: lstype, + LinkStateID: binary.BigEndian.Uint32(data[offset+4 : offset+8]), + AdvRouter: binary.BigEndian.Uint32(data[offset+8 : offset+12]), + LSSeqNumber: binary.BigEndian.Uint32(data[offset+12 : offset+16]), + LSChecksum: binary.BigEndian.Uint16(data[offset+16 : offset+18]), + Length: lsalength, + }, + Content: content, + } + lsas = append(lsas, lsa) + offset += uint32(lsalength) + } + return lsas, nil +} + +// extractLSAInformation extracts all the LSA information +func extractLSAInformation(lstype, lsalength uint16, data []byte) (interface{}, error) { + if lsalength < 20 { + return nil, fmt.Errorf("Link State header length %v too short, %v required", lsalength, 20) + } + if len(data) < int(lsalength) { + return nil, fmt.Errorf("Link State header length %v too short, %v required", len(data), lsalength) + } + var content interface{} + switch lstype { + case RouterLSAtypeV2: + var routers []RouterV2 + var j uint32 + for j = 24; j < uint32(lsalength); j += 12 { + if len(data) < int(j+12) { + return nil, errors.New("LSAtypeV2 too small") + } + router := RouterV2{ + LinkID: binary.BigEndian.Uint32(data[j : j+4]), + LinkData: binary.BigEndian.Uint32(data[j+4 : j+8]), + Type: uint8(data[j+8]), + Metric: binary.BigEndian.Uint16(data[j+10 : j+12]), + } + routers = append(routers, router) + } + if len(data) < 24 { + return nil, errors.New("LSAtypeV2 too small") + } + links := binary.BigEndian.Uint16(data[22:24]) + content = RouterLSAV2{ + Flags: data[20], + Links: links, + Routers: routers, + } + case NSSALSAtypeV2: + fallthrough + case ASExternalLSAtypeV2: + content = ASExternalLSAV2{ + NetworkMask: binary.BigEndian.Uint32(data[20:24]), + ExternalBit: data[24] & 0x80, + Metric: binary.BigEndian.Uint32(data[24:28]) & 0x00FFFFFF, + ForwardingAddress: binary.BigEndian.Uint32(data[28:32]), + ExternalRouteTag: binary.BigEndian.Uint32(data[32:36]), + } + case NetworkLSAtypeV2: + var routers []uint32 + var j uint32 + for j = 24; j < uint32(lsalength); j += 4 { + routers = append(routers, binary.BigEndian.Uint32(data[j:j+4])) + } + content = NetworkLSAV2{ + NetworkMask: binary.BigEndian.Uint32(data[20:24]), + AttachedRouter: routers, + } + case RouterLSAtype: + var routers []Router + var j uint32 + for j = 24; j < uint32(lsalength); j += 16 { + router := Router{ + Type: uint8(data[j]), + Metric: binary.BigEndian.Uint16(data[j+2 : j+4]), + InterfaceID: binary.BigEndian.Uint32(data[j+4 : j+8]), + NeighborInterfaceID: binary.BigEndian.Uint32(data[j+8 : j+12]), + NeighborRouterID: binary.BigEndian.Uint32(data[j+12 : j+16]), + } + routers = append(routers, router) + } + content = RouterLSA{ + Flags: uint8(data[20]), + Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF, + Routers: routers, + } + case NetworkLSAtype: + var routers []uint32 + var j uint32 + for j = 24; j < uint32(lsalength); j += 4 { + routers = append(routers, binary.BigEndian.Uint32(data[j:j+4])) + } + content = NetworkLSA{ + Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF, + AttachedRouter: routers, + } + case InterAreaPrefixLSAtype: + content = InterAreaPrefixLSA{ + Metric: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF, + PrefixLength: uint8(data[24]), + PrefixOptions: uint8(data[25]), + AddressPrefix: data[28:uint32(lsalength)], + } + case InterAreaRouterLSAtype: + content = InterAreaRouterLSA{ + Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF, + Metric: binary.BigEndian.Uint32(data[24:28]) & 0x00FFFFFF, + DestinationRouterID: binary.BigEndian.Uint32(data[28:32]), + } + case ASExternalLSAtype: + fallthrough + case NSSALSAtype: + flags := uint8(data[20]) + prefixLen := uint8(data[24]) / 8 + var forwardingAddress []byte + if (flags & 0x02) == 0x02 { + forwardingAddress = data[28+uint32(prefixLen) : 28+uint32(prefixLen)+16] + } + content = ASExternalLSA{ + Flags: flags, + Metric: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF, + PrefixLength: prefixLen, + PrefixOptions: uint8(data[25]), + RefLSType: binary.BigEndian.Uint16(data[26:28]), + AddressPrefix: data[28 : 28+uint32(prefixLen)], + ForwardingAddress: forwardingAddress, + } + case LinkLSAtype: + var prefixes []Prefix + var prefixOffset uint32 = 44 + var j uint32 + numOfPrefixes := binary.BigEndian.Uint32(data[40:44]) + for j = 0; j < numOfPrefixes; j++ { + prefixLen := uint8(data[prefixOffset]) + prefix := Prefix{ + PrefixLength: prefixLen, + PrefixOptions: uint8(data[prefixOffset+1]), + AddressPrefix: data[prefixOffset+4 : prefixOffset+4+uint32(prefixLen)/8], + } + prefixes = append(prefixes, prefix) + prefixOffset = prefixOffset + 4 + uint32(prefixLen)/8 + } + content = LinkLSA{ + RtrPriority: uint8(data[20]), + Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF, + LinkLocalAddress: data[24:40], + NumOfPrefixes: numOfPrefixes, + Prefixes: prefixes, + } + case IntraAreaPrefixLSAtype: + var prefixes []Prefix + var prefixOffset uint32 = 32 + var j uint16 + numOfPrefixes := binary.BigEndian.Uint16(data[20:22]) + for j = 0; j < numOfPrefixes; j++ { + prefixLen := uint8(data[prefixOffset]) + prefix := Prefix{ + PrefixLength: prefixLen, + PrefixOptions: uint8(data[prefixOffset+1]), + Metric: binary.BigEndian.Uint16(data[prefixOffset+2 : prefixOffset+4]), + AddressPrefix: data[prefixOffset+4 : prefixOffset+4+uint32(prefixLen)/8], + } + prefixes = append(prefixes, prefix) + prefixOffset = prefixOffset + 4 + uint32(prefixLen) + } + content = IntraAreaPrefixLSA{ + NumOfPrefixes: numOfPrefixes, + RefLSType: binary.BigEndian.Uint16(data[22:24]), + RefLinkStateID: binary.BigEndian.Uint32(data[24:28]), + RefAdvRouter: binary.BigEndian.Uint32(data[28:32]), + Prefixes: prefixes, + } + default: + return nil, fmt.Errorf("Unknown Link State type.") + } + return content, nil +} + +// getLSAs parses the LSA information from the packet for OSPFv3 +func getLSAs(num uint32, data []byte) ([]LSA, error) { + var lsas []LSA + var i uint32 = 0 + var offset uint32 = 0 + for ; i < num; i++ { + var content interface{} + lstype := binary.BigEndian.Uint16(data[offset+2 : offset+4]) + lsalength := binary.BigEndian.Uint16(data[offset+18 : offset+20]) + + content, err := extractLSAInformation(lstype, lsalength, data[offset:]) + if err != nil { + return nil, fmt.Errorf("Could not extract Link State type.") + } + lsa := LSA{ + LSAheader: LSAheader{ + LSAge: binary.BigEndian.Uint16(data[offset : offset+2]), + LSType: lstype, + LinkStateID: binary.BigEndian.Uint32(data[offset+4 : offset+8]), + AdvRouter: binary.BigEndian.Uint32(data[offset+8 : offset+12]), + LSSeqNumber: binary.BigEndian.Uint32(data[offset+12 : offset+16]), + LSChecksum: binary.BigEndian.Uint16(data[offset+16 : offset+18]), + Length: lsalength, + }, + Content: content, + } + lsas = append(lsas, lsa) + offset += uint32(lsalength) + } + return lsas, nil +} + +// DecodeFromBytes decodes the given bytes into the OSPF layer. +func (ospf *OSPFv2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 24 { + return fmt.Errorf("Packet too smal for OSPF Version 2") + } + + ospf.Version = uint8(data[0]) + ospf.Type = OSPFType(data[1]) + ospf.PacketLength = binary.BigEndian.Uint16(data[2:4]) + ospf.RouterID = binary.BigEndian.Uint32(data[4:8]) + ospf.AreaID = binary.BigEndian.Uint32(data[8:12]) + ospf.Checksum = binary.BigEndian.Uint16(data[12:14]) + ospf.AuType = binary.BigEndian.Uint16(data[14:16]) + ospf.Authentication = binary.BigEndian.Uint64(data[16:24]) + + switch ospf.Type { + case OSPFHello: + var neighbors []uint32 + for i := 44; uint16(i+4) <= ospf.PacketLength; i += 4 { + neighbors = append(neighbors, binary.BigEndian.Uint32(data[i:i+4])) + } + ospf.Content = HelloPkgV2{ + NetworkMask: binary.BigEndian.Uint32(data[24:28]), + HelloPkg: HelloPkg{ + HelloInterval: binary.BigEndian.Uint16(data[28:30]), + Options: uint32(data[30]), + RtrPriority: uint8(data[31]), + RouterDeadInterval: binary.BigEndian.Uint32(data[32:36]), + DesignatedRouterID: binary.BigEndian.Uint32(data[36:40]), + BackupDesignatedRouterID: binary.BigEndian.Uint32(data[40:44]), + NeighborID: neighbors, + }, + } + case OSPFDatabaseDescription: + var lsas []LSAheader + for i := 32; uint16(i+20) <= ospf.PacketLength; i += 20 { + lsa := LSAheader{ + LSAge: binary.BigEndian.Uint16(data[i : i+2]), + LSType: binary.BigEndian.Uint16(data[i+2 : i+4]), + LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]), + AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]), + LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]), + LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]), + Length: binary.BigEndian.Uint16(data[i+18 : i+20]), + } + lsas = append(lsas, lsa) + } + ospf.Content = DbDescPkg{ + InterfaceMTU: binary.BigEndian.Uint16(data[24:26]), + Options: uint32(data[26]), + Flags: uint16(data[27]), + DDSeqNumber: binary.BigEndian.Uint32(data[28:32]), + LSAinfo: lsas, + } + case OSPFLinkStateRequest: + var lsrs []LSReq + for i := 24; uint16(i+12) <= ospf.PacketLength; i += 12 { + lsr := LSReq{ + LSType: binary.BigEndian.Uint16(data[i+2 : i+4]), + LSID: binary.BigEndian.Uint32(data[i+4 : i+8]), + AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]), + } + lsrs = append(lsrs, lsr) + } + ospf.Content = lsrs + case OSPFLinkStateUpdate: + num := binary.BigEndian.Uint32(data[24:28]) + + lsas, err := getLSAsv2(num, data[28:]) + if err != nil { + return fmt.Errorf("Cannot parse Link State Update packet: %v", err) + } + ospf.Content = LSUpdate{ + NumOfLSAs: num, + LSAs: lsas, + } + case OSPFLinkStateAcknowledgment: + var lsas []LSAheader + for i := 24; uint16(i+20) <= ospf.PacketLength; i += 20 { + lsa := LSAheader{ + LSAge: binary.BigEndian.Uint16(data[i : i+2]), + LSOptions: data[i+2], + LSType: uint16(data[i+3]), + LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]), + AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]), + LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]), + LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]), + Length: binary.BigEndian.Uint16(data[i+18 : i+20]), + } + lsas = append(lsas, lsa) + } + ospf.Content = lsas + } + return nil +} + +// DecodeFromBytes decodes the given bytes into the OSPF layer. +func (ospf *OSPFv3) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + + if len(data) < 16 { + return fmt.Errorf("Packet too smal for OSPF Version 3") + } + + ospf.Version = uint8(data[0]) + ospf.Type = OSPFType(data[1]) + ospf.PacketLength = binary.BigEndian.Uint16(data[2:4]) + ospf.RouterID = binary.BigEndian.Uint32(data[4:8]) + ospf.AreaID = binary.BigEndian.Uint32(data[8:12]) + ospf.Checksum = binary.BigEndian.Uint16(data[12:14]) + ospf.Instance = uint8(data[14]) + ospf.Reserved = uint8(data[15]) + + switch ospf.Type { + case OSPFHello: + var neighbors []uint32 + for i := 36; uint16(i+4) <= ospf.PacketLength; i += 4 { + neighbors = append(neighbors, binary.BigEndian.Uint32(data[i:i+4])) + } + ospf.Content = HelloPkg{ + InterfaceID: binary.BigEndian.Uint32(data[16:20]), + RtrPriority: uint8(data[20]), + Options: binary.BigEndian.Uint32(data[21:25]) >> 8, + HelloInterval: binary.BigEndian.Uint16(data[24:26]), + RouterDeadInterval: uint32(binary.BigEndian.Uint16(data[26:28])), + DesignatedRouterID: binary.BigEndian.Uint32(data[28:32]), + BackupDesignatedRouterID: binary.BigEndian.Uint32(data[32:36]), + NeighborID: neighbors, + } + case OSPFDatabaseDescription: + var lsas []LSAheader + for i := 28; uint16(i+20) <= ospf.PacketLength; i += 20 { + lsa := LSAheader{ + LSAge: binary.BigEndian.Uint16(data[i : i+2]), + LSType: binary.BigEndian.Uint16(data[i+2 : i+4]), + LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]), + AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]), + LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]), + LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]), + Length: binary.BigEndian.Uint16(data[i+18 : i+20]), + } + lsas = append(lsas, lsa) + } + ospf.Content = DbDescPkg{ + Options: binary.BigEndian.Uint32(data[16:20]) & 0x00FFFFFF, + InterfaceMTU: binary.BigEndian.Uint16(data[20:22]), + Flags: binary.BigEndian.Uint16(data[22:24]), + DDSeqNumber: binary.BigEndian.Uint32(data[24:28]), + LSAinfo: lsas, + } + case OSPFLinkStateRequest: + var lsrs []LSReq + for i := 16; uint16(i+12) <= ospf.PacketLength; i += 12 { + lsr := LSReq{ + LSType: binary.BigEndian.Uint16(data[i+2 : i+4]), + LSID: binary.BigEndian.Uint32(data[i+4 : i+8]), + AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]), + } + lsrs = append(lsrs, lsr) + } + ospf.Content = lsrs + case OSPFLinkStateUpdate: + num := binary.BigEndian.Uint32(data[16:20]) + lsas, err := getLSAs(num, data[20:]) + if err != nil { + return fmt.Errorf("Cannot parse Link State Update packet: %v", err) + } + ospf.Content = LSUpdate{ + NumOfLSAs: num, + LSAs: lsas, + } + + case OSPFLinkStateAcknowledgment: + var lsas []LSAheader + for i := 16; uint16(i+20) <= ospf.PacketLength; i += 20 { + lsa := LSAheader{ + LSAge: binary.BigEndian.Uint16(data[i : i+2]), + LSType: binary.BigEndian.Uint16(data[i+2 : i+4]), + LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]), + AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]), + LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]), + LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]), + Length: binary.BigEndian.Uint16(data[i+18 : i+20]), + } + lsas = append(lsas, lsa) + } + ospf.Content = lsas + default: + } + + return nil +} + +// LayerType returns LayerTypeOSPF +func (ospf *OSPFv2) LayerType() gopacket.LayerType { + return LayerTypeOSPF +} +func (ospf *OSPFv3) LayerType() gopacket.LayerType { + return LayerTypeOSPF +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (ospf *OSPFv2) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} +func (ospf *OSPFv3) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (ospf *OSPFv2) CanDecode() gopacket.LayerClass { + return LayerTypeOSPF +} +func (ospf *OSPFv3) CanDecode() gopacket.LayerClass { + return LayerTypeOSPF +} + +func decodeOSPF(data []byte, p gopacket.PacketBuilder) error { + if len(data) < 14 { + return fmt.Errorf("Packet too smal for OSPF") + } + + switch uint8(data[0]) { + case 2: + ospf := &OSPFv2{} + return decodingLayerDecoder(ospf, data, p) + case 3: + ospf := &OSPFv3{} + return decodingLayerDecoder(ospf, data, p) + default: + } + + return fmt.Errorf("Unable to determine OSPF type.") +} diff --git a/vendor/github.com/google/gopacket/layers/pflog.go b/vendor/github.com/google/gopacket/layers/pflog.go new file mode 100644 index 0000000000..9dbbd90d15 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/pflog.go @@ -0,0 +1,84 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/google/gopacket" +) + +type PFDirection uint8 + +const ( + PFDirectionInOut PFDirection = 0 + PFDirectionIn PFDirection = 1 + PFDirectionOut PFDirection = 2 +) + +// PFLog provides the layer for 'pf' packet-filter logging, as described at +// http://www.freebsd.org/cgi/man.cgi?query=pflog&sektion=4 +type PFLog struct { + BaseLayer + Length uint8 + Family ProtocolFamily + Action, Reason uint8 + IFName, Ruleset []byte + RuleNum, SubruleNum uint32 + UID uint32 + PID int32 + RuleUID uint32 + RulePID int32 + Direction PFDirection + // The remainder is padding +} + +func (pf *PFLog) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 60 { + df.SetTruncated() + return errors.New("PFLog data less than 60 bytes") + } + pf.Length = data[0] + pf.Family = ProtocolFamily(data[1]) + pf.Action = data[2] + pf.Reason = data[3] + pf.IFName = data[4:20] + pf.Ruleset = data[20:36] + pf.RuleNum = binary.BigEndian.Uint32(data[36:40]) + pf.SubruleNum = binary.BigEndian.Uint32(data[40:44]) + pf.UID = binary.BigEndian.Uint32(data[44:48]) + pf.PID = int32(binary.BigEndian.Uint32(data[48:52])) + pf.RuleUID = binary.BigEndian.Uint32(data[52:56]) + pf.RulePID = int32(binary.BigEndian.Uint32(data[56:60])) + pf.Direction = PFDirection(data[60]) + if pf.Length%4 != 1 { + return errors.New("PFLog header length should be 3 less than multiple of 4") + } + actualLength := int(pf.Length) + 3 + if len(data) < actualLength { + return fmt.Errorf("PFLog data size < %d", actualLength) + } + pf.Contents = data[:actualLength] + pf.Payload = data[actualLength:] + return nil +} + +// LayerType returns layers.LayerTypePFLog +func (pf *PFLog) LayerType() gopacket.LayerType { return LayerTypePFLog } + +func (pf *PFLog) CanDecode() gopacket.LayerClass { return LayerTypePFLog } + +func (pf *PFLog) NextLayerType() gopacket.LayerType { + return pf.Family.LayerType() +} + +func decodePFLog(data []byte, p gopacket.PacketBuilder) error { + pf := &PFLog{} + return decodingLayerDecoder(pf, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/ports.go b/vendor/github.com/google/gopacket/layers/ports.go new file mode 100644 index 0000000000..1e3f42efc2 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/ports.go @@ -0,0 +1,156 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "fmt" + "strconv" + + "github.com/google/gopacket" +) + +// TCPPort is a port in a TCP layer. +type TCPPort uint16 + +// UDPPort is a port in a UDP layer. +type UDPPort uint16 + +// RUDPPort is a port in a RUDP layer. +type RUDPPort uint8 + +// SCTPPort is a port in a SCTP layer. +type SCTPPort uint16 + +// UDPLitePort is a port in a UDPLite layer. +type UDPLitePort uint16 + +// RUDPPortNames contains the string names for all RUDP ports. +var RUDPPortNames = map[RUDPPort]string{} + +// UDPLitePortNames contains the string names for all UDPLite ports. +var UDPLitePortNames = map[UDPLitePort]string{} + +// {TCP,UDP,SCTP}PortNames can be found in iana_ports.go + +// String returns the port as "number(name)" if there's a well-known port name, +// or just "number" if there isn't. Well-known names are stored in +// TCPPortNames. +func (a TCPPort) String() string { + if name, ok := TCPPortNames[a]; ok { + return fmt.Sprintf("%d(%s)", a, name) + } + return strconv.Itoa(int(a)) +} + +// LayerType returns a LayerType that would be able to decode the +// application payload. It uses some well-known ports such as 53 for +// DNS. +// +// Returns gopacket.LayerTypePayload for unknown/unsupported port numbers. +func (a TCPPort) LayerType() gopacket.LayerType { + lt := tcpPortLayerType[uint16(a)] + if lt != 0 { + return lt + } + return gopacket.LayerTypePayload +} + +var tcpPortLayerType = [65536]gopacket.LayerType{ + 53: LayerTypeDNS, + 443: LayerTypeTLS, // https + 502: LayerTypeModbusTCP, // modbustcp + 636: LayerTypeTLS, // ldaps + 989: LayerTypeTLS, // ftps-data + 990: LayerTypeTLS, // ftps + 992: LayerTypeTLS, // telnets + 993: LayerTypeTLS, // imaps + 994: LayerTypeTLS, // ircs + 995: LayerTypeTLS, // pop3s + 5061: LayerTypeTLS, // ips +} + +// RegisterTCPPortLayerType creates a new mapping between a TCPPort +// and an underlaying LayerType. +func RegisterTCPPortLayerType(port TCPPort, layerType gopacket.LayerType) { + tcpPortLayerType[port] = layerType +} + +// String returns the port as "number(name)" if there's a well-known port name, +// or just "number" if there isn't. Well-known names are stored in +// UDPPortNames. +func (a UDPPort) String() string { + if name, ok := UDPPortNames[a]; ok { + return fmt.Sprintf("%d(%s)", a, name) + } + return strconv.Itoa(int(a)) +} + +// LayerType returns a LayerType that would be able to decode the +// application payload. It uses some well-known ports such as 53 for +// DNS. +// +// Returns gopacket.LayerTypePayload for unknown/unsupported port numbers. +func (a UDPPort) LayerType() gopacket.LayerType { + lt := udpPortLayerType[uint16(a)] + if lt != 0 { + return lt + } + return gopacket.LayerTypePayload +} + +var udpPortLayerType = [65536]gopacket.LayerType{ + 53: LayerTypeDNS, + 123: LayerTypeNTP, + 4789: LayerTypeVXLAN, + 67: LayerTypeDHCPv4, + 68: LayerTypeDHCPv4, + 546: LayerTypeDHCPv6, + 547: LayerTypeDHCPv6, + 5060: LayerTypeSIP, + 6343: LayerTypeSFlow, + 6081: LayerTypeGeneve, + 3784: LayerTypeBFD, + 2152: LayerTypeGTPv1U, + 623: LayerTypeRMCP, + 1812: LayerTypeRADIUS, +} + +// RegisterUDPPortLayerType creates a new mapping between a UDPPort +// and an underlaying LayerType. +func RegisterUDPPortLayerType(port UDPPort, layerType gopacket.LayerType) { + udpPortLayerType[port] = layerType +} + +// String returns the port as "number(name)" if there's a well-known port name, +// or just "number" if there isn't. Well-known names are stored in +// RUDPPortNames. +func (a RUDPPort) String() string { + if name, ok := RUDPPortNames[a]; ok { + return fmt.Sprintf("%d(%s)", a, name) + } + return strconv.Itoa(int(a)) +} + +// String returns the port as "number(name)" if there's a well-known port name, +// or just "number" if there isn't. Well-known names are stored in +// SCTPPortNames. +func (a SCTPPort) String() string { + if name, ok := SCTPPortNames[a]; ok { + return fmt.Sprintf("%d(%s)", a, name) + } + return strconv.Itoa(int(a)) +} + +// String returns the port as "number(name)" if there's a well-known port name, +// or just "number" if there isn't. Well-known names are stored in +// UDPLitePortNames. +func (a UDPLitePort) String() string { + if name, ok := UDPLitePortNames[a]; ok { + return fmt.Sprintf("%d(%s)", a, name) + } + return strconv.Itoa(int(a)) +} diff --git a/vendor/github.com/google/gopacket/layers/ppp.go b/vendor/github.com/google/gopacket/layers/ppp.go new file mode 100644 index 0000000000..e534d698cb --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/ppp.go @@ -0,0 +1,88 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "github.com/google/gopacket" +) + +// PPP is the layer for PPP encapsulation headers. +type PPP struct { + BaseLayer + PPPType PPPType + HasPPTPHeader bool +} + +// PPPEndpoint is a singleton endpoint for PPP. Since there is no actual +// addressing for the two ends of a PPP connection, we use a singleton value +// named 'point' for each endpoint. +var PPPEndpoint = gopacket.NewEndpoint(EndpointPPP, nil) + +// PPPFlow is a singleton flow for PPP. Since there is no actual addressing for +// the two ends of a PPP connection, we use a singleton value to represent the +// flow for all PPP connections. +var PPPFlow = gopacket.NewFlow(EndpointPPP, nil, nil) + +// LayerType returns LayerTypePPP +func (p *PPP) LayerType() gopacket.LayerType { return LayerTypePPP } + +// LinkFlow returns PPPFlow. +func (p *PPP) LinkFlow() gopacket.Flow { return PPPFlow } + +func decodePPP(data []byte, p gopacket.PacketBuilder) error { + ppp := &PPP{} + offset := 0 + if data[0] == 0xff && data[1] == 0x03 { + offset = 2 + ppp.HasPPTPHeader = true + } + if data[offset]&0x1 == 0 { + if data[offset+1]&0x1 == 0 { + return errors.New("PPP has invalid type") + } + ppp.PPPType = PPPType(binary.BigEndian.Uint16(data[offset : offset+2])) + ppp.Contents = data[offset : offset+2] + ppp.Payload = data[offset+2:] + } else { + ppp.PPPType = PPPType(data[offset]) + ppp.Contents = data[offset : offset+1] + ppp.Payload = data[offset+1:] + } + p.AddLayer(ppp) + p.SetLinkLayer(ppp) + return p.NextDecoder(ppp.PPPType) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (p *PPP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if p.PPPType&0x100 == 0 { + bytes, err := b.PrependBytes(2) + if err != nil { + return err + } + binary.BigEndian.PutUint16(bytes, uint16(p.PPPType)) + } else { + bytes, err := b.PrependBytes(1) + if err != nil { + return err + } + bytes[0] = uint8(p.PPPType) + } + if p.HasPPTPHeader { + bytes, err := b.PrependBytes(2) + if err != nil { + return err + } + bytes[0] = 0xff + bytes[1] = 0x03 + } + return nil +} diff --git a/vendor/github.com/google/gopacket/layers/pppoe.go b/vendor/github.com/google/gopacket/layers/pppoe.go new file mode 100644 index 0000000000..14cd63a189 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/pppoe.go @@ -0,0 +1,60 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "github.com/google/gopacket" +) + +// PPPoE is the layer for PPPoE encapsulation headers. +type PPPoE struct { + BaseLayer + Version uint8 + Type uint8 + Code PPPoECode + SessionId uint16 + Length uint16 +} + +// LayerType returns gopacket.LayerTypePPPoE. +func (p *PPPoE) LayerType() gopacket.LayerType { + return LayerTypePPPoE +} + +// decodePPPoE decodes the PPPoE header (see http://tools.ietf.org/html/rfc2516). +func decodePPPoE(data []byte, p gopacket.PacketBuilder) error { + pppoe := &PPPoE{ + Version: data[0] >> 4, + Type: data[0] & 0x0F, + Code: PPPoECode(data[1]), + SessionId: binary.BigEndian.Uint16(data[2:4]), + Length: binary.BigEndian.Uint16(data[4:6]), + } + pppoe.BaseLayer = BaseLayer{data[:6], data[6 : 6+pppoe.Length]} + p.AddLayer(pppoe) + return p.NextDecoder(pppoe.Code) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (p *PPPoE) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + payload := b.Bytes() + bytes, err := b.PrependBytes(6) + if err != nil { + return err + } + bytes[0] = (p.Version << 4) | p.Type + bytes[1] = byte(p.Code) + binary.BigEndian.PutUint16(bytes[2:], p.SessionId) + if opts.FixLengths { + p.Length = uint16(len(payload)) + } + binary.BigEndian.PutUint16(bytes[4:], p.Length) + return nil +} diff --git a/vendor/github.com/google/gopacket/layers/prism.go b/vendor/github.com/google/gopacket/layers/prism.go new file mode 100644 index 0000000000..e1711e7f5b --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/prism.go @@ -0,0 +1,146 @@ +// Copyright 2015 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +// http://www.tcpdump.org/linktypes/LINKTYPE_IEEE802_11_PRISM.html + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/google/gopacket" +) + +func decodePrismValue(data []byte, pv *PrismValue) { + pv.DID = PrismDID(binary.LittleEndian.Uint32(data[0:4])) + pv.Status = binary.LittleEndian.Uint16(data[4:6]) + pv.Length = binary.LittleEndian.Uint16(data[6:8]) + pv.Data = data[8 : 8+pv.Length] +} + +type PrismDID uint32 + +const ( + PrismDIDType1HostTime PrismDID = 0x10044 + PrismDIDType2HostTime PrismDID = 0x01041 + PrismDIDType1MACTime PrismDID = 0x20044 + PrismDIDType2MACTime PrismDID = 0x02041 + PrismDIDType1Channel PrismDID = 0x30044 + PrismDIDType2Channel PrismDID = 0x03041 + PrismDIDType1RSSI PrismDID = 0x40044 + PrismDIDType2RSSI PrismDID = 0x04041 + PrismDIDType1SignalQuality PrismDID = 0x50044 + PrismDIDType2SignalQuality PrismDID = 0x05041 + PrismDIDType1Signal PrismDID = 0x60044 + PrismDIDType2Signal PrismDID = 0x06041 + PrismDIDType1Noise PrismDID = 0x70044 + PrismDIDType2Noise PrismDID = 0x07041 + PrismDIDType1Rate PrismDID = 0x80044 + PrismDIDType2Rate PrismDID = 0x08041 + PrismDIDType1TransmittedFrameIndicator PrismDID = 0x90044 + PrismDIDType2TransmittedFrameIndicator PrismDID = 0x09041 + PrismDIDType1FrameLength PrismDID = 0xA0044 + PrismDIDType2FrameLength PrismDID = 0x0A041 +) + +const ( + PrismType1MessageCode uint16 = 0x00000044 + PrismType2MessageCode uint16 = 0x00000041 +) + +func (p PrismDID) String() string { + dids := map[PrismDID]string{ + PrismDIDType1HostTime: "Host Time", + PrismDIDType2HostTime: "Host Time", + PrismDIDType1MACTime: "MAC Time", + PrismDIDType2MACTime: "MAC Time", + PrismDIDType1Channel: "Channel", + PrismDIDType2Channel: "Channel", + PrismDIDType1RSSI: "RSSI", + PrismDIDType2RSSI: "RSSI", + PrismDIDType1SignalQuality: "Signal Quality", + PrismDIDType2SignalQuality: "Signal Quality", + PrismDIDType1Signal: "Signal", + PrismDIDType2Signal: "Signal", + PrismDIDType1Noise: "Noise", + PrismDIDType2Noise: "Noise", + PrismDIDType1Rate: "Rate", + PrismDIDType2Rate: "Rate", + PrismDIDType1TransmittedFrameIndicator: "Transmitted Frame Indicator", + PrismDIDType2TransmittedFrameIndicator: "Transmitted Frame Indicator", + PrismDIDType1FrameLength: "Frame Length", + PrismDIDType2FrameLength: "Frame Length", + } + + if str, ok := dids[p]; ok { + return str + } + + return "Unknown DID" +} + +type PrismValue struct { + DID PrismDID + Status uint16 + Length uint16 + Data []byte +} + +func (pv *PrismValue) IsSupplied() bool { + return pv.Status == 1 +} + +var ErrPrismExpectedMoreData = errors.New("Expected more data.") +var ErrPrismInvalidCode = errors.New("Invalid header code.") + +func decodePrismHeader(data []byte, p gopacket.PacketBuilder) error { + d := &PrismHeader{} + return decodingLayerDecoder(d, data, p) +} + +type PrismHeader struct { + BaseLayer + Code uint16 + Length uint16 + DeviceName string + Values []PrismValue +} + +func (m *PrismHeader) LayerType() gopacket.LayerType { return LayerTypePrismHeader } + +func (m *PrismHeader) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Code = binary.LittleEndian.Uint16(data[0:4]) + m.Length = binary.LittleEndian.Uint16(data[4:8]) + m.DeviceName = string(data[8:24]) + m.BaseLayer = BaseLayer{Contents: data[:m.Length], Payload: data[m.Length:len(data)]} + + switch m.Code { + case PrismType1MessageCode: + fallthrough + case PrismType2MessageCode: + // valid message code + default: + return ErrPrismInvalidCode + } + + offset := uint16(24) + + m.Values = make([]PrismValue, (m.Length-offset)/12) + for i := 0; i < len(m.Values); i++ { + decodePrismValue(data[offset:offset+12], &m.Values[i]) + offset += 12 + } + + if offset != m.Length { + return ErrPrismExpectedMoreData + } + + return nil +} + +func (m *PrismHeader) CanDecode() gopacket.LayerClass { return LayerTypePrismHeader } +func (m *PrismHeader) NextLayerType() gopacket.LayerType { return LayerTypeDot11 } diff --git a/vendor/github.com/google/gopacket/layers/radiotap.go b/vendor/github.com/google/gopacket/layers/radiotap.go new file mode 100644 index 0000000000..d09559f793 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/radiotap.go @@ -0,0 +1,1076 @@ +// Copyright 2014 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "strings" + + "github.com/google/gopacket" +) + +// align calculates the number of bytes needed to align with the width +// on the offset, returning the number of bytes we need to skip to +// align to the offset (width). +func align(offset uint16, width uint16) uint16 { + return ((((offset) + ((width) - 1)) & (^((width) - 1))) - offset) +} + +type RadioTapPresent uint32 + +const ( + RadioTapPresentTSFT RadioTapPresent = 1 << iota + RadioTapPresentFlags + RadioTapPresentRate + RadioTapPresentChannel + RadioTapPresentFHSS + RadioTapPresentDBMAntennaSignal + RadioTapPresentDBMAntennaNoise + RadioTapPresentLockQuality + RadioTapPresentTxAttenuation + RadioTapPresentDBTxAttenuation + RadioTapPresentDBMTxPower + RadioTapPresentAntenna + RadioTapPresentDBAntennaSignal + RadioTapPresentDBAntennaNoise + RadioTapPresentRxFlags + RadioTapPresentTxFlags + RadioTapPresentRtsRetries + RadioTapPresentDataRetries + _ + RadioTapPresentMCS + RadioTapPresentAMPDUStatus + RadioTapPresentVHT + RadioTapPresentEXT RadioTapPresent = 1 << 31 +) + +func (r RadioTapPresent) TSFT() bool { + return r&RadioTapPresentTSFT != 0 +} +func (r RadioTapPresent) Flags() bool { + return r&RadioTapPresentFlags != 0 +} +func (r RadioTapPresent) Rate() bool { + return r&RadioTapPresentRate != 0 +} +func (r RadioTapPresent) Channel() bool { + return r&RadioTapPresentChannel != 0 +} +func (r RadioTapPresent) FHSS() bool { + return r&RadioTapPresentFHSS != 0 +} +func (r RadioTapPresent) DBMAntennaSignal() bool { + return r&RadioTapPresentDBMAntennaSignal != 0 +} +func (r RadioTapPresent) DBMAntennaNoise() bool { + return r&RadioTapPresentDBMAntennaNoise != 0 +} +func (r RadioTapPresent) LockQuality() bool { + return r&RadioTapPresentLockQuality != 0 +} +func (r RadioTapPresent) TxAttenuation() bool { + return r&RadioTapPresentTxAttenuation != 0 +} +func (r RadioTapPresent) DBTxAttenuation() bool { + return r&RadioTapPresentDBTxAttenuation != 0 +} +func (r RadioTapPresent) DBMTxPower() bool { + return r&RadioTapPresentDBMTxPower != 0 +} +func (r RadioTapPresent) Antenna() bool { + return r&RadioTapPresentAntenna != 0 +} +func (r RadioTapPresent) DBAntennaSignal() bool { + return r&RadioTapPresentDBAntennaSignal != 0 +} +func (r RadioTapPresent) DBAntennaNoise() bool { + return r&RadioTapPresentDBAntennaNoise != 0 +} +func (r RadioTapPresent) RxFlags() bool { + return r&RadioTapPresentRxFlags != 0 +} +func (r RadioTapPresent) TxFlags() bool { + return r&RadioTapPresentTxFlags != 0 +} +func (r RadioTapPresent) RtsRetries() bool { + return r&RadioTapPresentRtsRetries != 0 +} +func (r RadioTapPresent) DataRetries() bool { + return r&RadioTapPresentDataRetries != 0 +} +func (r RadioTapPresent) MCS() bool { + return r&RadioTapPresentMCS != 0 +} +func (r RadioTapPresent) AMPDUStatus() bool { + return r&RadioTapPresentAMPDUStatus != 0 +} +func (r RadioTapPresent) VHT() bool { + return r&RadioTapPresentVHT != 0 +} +func (r RadioTapPresent) EXT() bool { + return r&RadioTapPresentEXT != 0 +} + +type RadioTapChannelFlags uint16 + +const ( + RadioTapChannelFlagsTurbo RadioTapChannelFlags = 0x0010 // Turbo channel + RadioTapChannelFlagsCCK RadioTapChannelFlags = 0x0020 // CCK channel + RadioTapChannelFlagsOFDM RadioTapChannelFlags = 0x0040 // OFDM channel + RadioTapChannelFlagsGhz2 RadioTapChannelFlags = 0x0080 // 2 GHz spectrum channel. + RadioTapChannelFlagsGhz5 RadioTapChannelFlags = 0x0100 // 5 GHz spectrum channel + RadioTapChannelFlagsPassive RadioTapChannelFlags = 0x0200 // Only passive scan allowed + RadioTapChannelFlagsDynamic RadioTapChannelFlags = 0x0400 // Dynamic CCK-OFDM channel + RadioTapChannelFlagsGFSK RadioTapChannelFlags = 0x0800 // GFSK channel (FHSS PHY) +) + +func (r RadioTapChannelFlags) Turbo() bool { + return r&RadioTapChannelFlagsTurbo != 0 +} +func (r RadioTapChannelFlags) CCK() bool { + return r&RadioTapChannelFlagsCCK != 0 +} +func (r RadioTapChannelFlags) OFDM() bool { + return r&RadioTapChannelFlagsOFDM != 0 +} +func (r RadioTapChannelFlags) Ghz2() bool { + return r&RadioTapChannelFlagsGhz2 != 0 +} +func (r RadioTapChannelFlags) Ghz5() bool { + return r&RadioTapChannelFlagsGhz5 != 0 +} +func (r RadioTapChannelFlags) Passive() bool { + return r&RadioTapChannelFlagsPassive != 0 +} +func (r RadioTapChannelFlags) Dynamic() bool { + return r&RadioTapChannelFlagsDynamic != 0 +} +func (r RadioTapChannelFlags) GFSK() bool { + return r&RadioTapChannelFlagsGFSK != 0 +} + +// String provides a human readable string for RadioTapChannelFlags. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the RadioTapChannelFlags value, not its string. +func (a RadioTapChannelFlags) String() string { + var out bytes.Buffer + if a.Turbo() { + out.WriteString("Turbo,") + } + if a.CCK() { + out.WriteString("CCK,") + } + if a.OFDM() { + out.WriteString("OFDM,") + } + if a.Ghz2() { + out.WriteString("Ghz2,") + } + if a.Ghz5() { + out.WriteString("Ghz5,") + } + if a.Passive() { + out.WriteString("Passive,") + } + if a.Dynamic() { + out.WriteString("Dynamic,") + } + if a.GFSK() { + out.WriteString("GFSK,") + } + + if length := out.Len(); length > 0 { + return string(out.Bytes()[:length-1]) // strip final comma + } + return "" +} + +type RadioTapFlags uint8 + +const ( + RadioTapFlagsCFP RadioTapFlags = 1 << iota // sent/received during CFP + RadioTapFlagsShortPreamble // sent/received * with short * preamble + RadioTapFlagsWEP // sent/received * with WEP encryption + RadioTapFlagsFrag // sent/received * with fragmentation + RadioTapFlagsFCS // frame includes FCS + RadioTapFlagsDatapad // frame has padding between * 802.11 header and payload * (to 32-bit boundary) + RadioTapFlagsBadFCS // does not pass FCS check + RadioTapFlagsShortGI // HT short GI +) + +func (r RadioTapFlags) CFP() bool { + return r&RadioTapFlagsCFP != 0 +} +func (r RadioTapFlags) ShortPreamble() bool { + return r&RadioTapFlagsShortPreamble != 0 +} +func (r RadioTapFlags) WEP() bool { + return r&RadioTapFlagsWEP != 0 +} +func (r RadioTapFlags) Frag() bool { + return r&RadioTapFlagsFrag != 0 +} +func (r RadioTapFlags) FCS() bool { + return r&RadioTapFlagsFCS != 0 +} +func (r RadioTapFlags) Datapad() bool { + return r&RadioTapFlagsDatapad != 0 +} +func (r RadioTapFlags) BadFCS() bool { + return r&RadioTapFlagsBadFCS != 0 +} +func (r RadioTapFlags) ShortGI() bool { + return r&RadioTapFlagsShortGI != 0 +} + +// String provides a human readable string for RadioTapFlags. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the RadioTapFlags value, not its string. +func (a RadioTapFlags) String() string { + var out bytes.Buffer + if a.CFP() { + out.WriteString("CFP,") + } + if a.ShortPreamble() { + out.WriteString("SHORT-PREAMBLE,") + } + if a.WEP() { + out.WriteString("WEP,") + } + if a.Frag() { + out.WriteString("FRAG,") + } + if a.FCS() { + out.WriteString("FCS,") + } + if a.Datapad() { + out.WriteString("DATAPAD,") + } + if a.ShortGI() { + out.WriteString("SHORT-GI,") + } + + if length := out.Len(); length > 0 { + return string(out.Bytes()[:length-1]) // strip final comma + } + return "" +} + +type RadioTapRate uint8 + +func (a RadioTapRate) String() string { + return fmt.Sprintf("%v Mb/s", 0.5*float32(a)) +} + +type RadioTapChannelFrequency uint16 + +func (a RadioTapChannelFrequency) String() string { + return fmt.Sprintf("%d MHz", a) +} + +type RadioTapRxFlags uint16 + +const ( + RadioTapRxFlagsBadPlcp RadioTapRxFlags = 0x0002 +) + +func (self RadioTapRxFlags) BadPlcp() bool { + return self&RadioTapRxFlagsBadPlcp != 0 +} + +func (self RadioTapRxFlags) String() string { + if self.BadPlcp() { + return "BADPLCP" + } + return "" +} + +type RadioTapTxFlags uint16 + +const ( + RadioTapTxFlagsFail RadioTapTxFlags = 1 << iota + RadioTapTxFlagsCTS + RadioTapTxFlagsRTS + RadioTapTxFlagsNoACK +) + +func (self RadioTapTxFlags) Fail() bool { return self&RadioTapTxFlagsFail != 0 } +func (self RadioTapTxFlags) CTS() bool { return self&RadioTapTxFlagsCTS != 0 } +func (self RadioTapTxFlags) RTS() bool { return self&RadioTapTxFlagsRTS != 0 } +func (self RadioTapTxFlags) NoACK() bool { return self&RadioTapTxFlagsNoACK != 0 } + +func (self RadioTapTxFlags) String() string { + var tokens []string + if self.Fail() { + tokens = append(tokens, "Fail") + } + if self.CTS() { + tokens = append(tokens, "CTS") + } + if self.RTS() { + tokens = append(tokens, "RTS") + } + if self.NoACK() { + tokens = append(tokens, "NoACK") + } + return strings.Join(tokens, ",") +} + +type RadioTapMCS struct { + Known RadioTapMCSKnown + Flags RadioTapMCSFlags + MCS uint8 +} + +func (self RadioTapMCS) String() string { + var tokens []string + if self.Known.Bandwidth() { + token := "?" + switch self.Flags.Bandwidth() { + case 0: + token = "20" + case 1: + token = "40" + case 2: + token = "40(20L)" + case 3: + token = "40(20U)" + } + tokens = append(tokens, token) + } + if self.Known.MCSIndex() { + tokens = append(tokens, fmt.Sprintf("MCSIndex#%d", self.MCS)) + } + if self.Known.GuardInterval() { + if self.Flags.ShortGI() { + tokens = append(tokens, fmt.Sprintf("shortGI")) + } else { + tokens = append(tokens, fmt.Sprintf("longGI")) + } + } + if self.Known.HTFormat() { + if self.Flags.Greenfield() { + tokens = append(tokens, fmt.Sprintf("HT-greenfield")) + } else { + tokens = append(tokens, fmt.Sprintf("HT-mixed")) + } + } + if self.Known.FECType() { + if self.Flags.FECLDPC() { + tokens = append(tokens, fmt.Sprintf("LDPC")) + } else { + tokens = append(tokens, fmt.Sprintf("BCC")) + } + } + if self.Known.STBC() { + tokens = append(tokens, fmt.Sprintf("STBC#%d", self.Flags.STBC())) + } + if self.Known.NESS() { + num := 0 + if self.Known.NESS1() { + num |= 0x02 + } + if self.Flags.NESS0() { + num |= 0x01 + } + tokens = append(tokens, fmt.Sprintf("num-of-ESS#%d", num)) + } + return strings.Join(tokens, ",") +} + +type RadioTapMCSKnown uint8 + +const ( + RadioTapMCSKnownBandwidth RadioTapMCSKnown = 1 << iota + RadioTapMCSKnownMCSIndex + RadioTapMCSKnownGuardInterval + RadioTapMCSKnownHTFormat + RadioTapMCSKnownFECType + RadioTapMCSKnownSTBC + RadioTapMCSKnownNESS + RadioTapMCSKnownNESS1 +) + +func (self RadioTapMCSKnown) Bandwidth() bool { return self&RadioTapMCSKnownBandwidth != 0 } +func (self RadioTapMCSKnown) MCSIndex() bool { return self&RadioTapMCSKnownMCSIndex != 0 } +func (self RadioTapMCSKnown) GuardInterval() bool { return self&RadioTapMCSKnownGuardInterval != 0 } +func (self RadioTapMCSKnown) HTFormat() bool { return self&RadioTapMCSKnownHTFormat != 0 } +func (self RadioTapMCSKnown) FECType() bool { return self&RadioTapMCSKnownFECType != 0 } +func (self RadioTapMCSKnown) STBC() bool { return self&RadioTapMCSKnownSTBC != 0 } +func (self RadioTapMCSKnown) NESS() bool { return self&RadioTapMCSKnownNESS != 0 } +func (self RadioTapMCSKnown) NESS1() bool { return self&RadioTapMCSKnownNESS1 != 0 } + +type RadioTapMCSFlags uint8 + +const ( + RadioTapMCSFlagsBandwidthMask RadioTapMCSFlags = 0x03 + RadioTapMCSFlagsShortGI = 0x04 + RadioTapMCSFlagsGreenfield = 0x08 + RadioTapMCSFlagsFECLDPC = 0x10 + RadioTapMCSFlagsSTBCMask = 0x60 + RadioTapMCSFlagsNESS0 = 0x80 +) + +func (self RadioTapMCSFlags) Bandwidth() int { + return int(self & RadioTapMCSFlagsBandwidthMask) +} +func (self RadioTapMCSFlags) ShortGI() bool { return self&RadioTapMCSFlagsShortGI != 0 } +func (self RadioTapMCSFlags) Greenfield() bool { return self&RadioTapMCSFlagsGreenfield != 0 } +func (self RadioTapMCSFlags) FECLDPC() bool { return self&RadioTapMCSFlagsFECLDPC != 0 } +func (self RadioTapMCSFlags) STBC() int { + return int(self&RadioTapMCSFlagsSTBCMask) >> 5 +} +func (self RadioTapMCSFlags) NESS0() bool { return self&RadioTapMCSFlagsNESS0 != 0 } + +type RadioTapAMPDUStatus struct { + Reference uint32 + Flags RadioTapAMPDUStatusFlags + CRC uint8 +} + +func (self RadioTapAMPDUStatus) String() string { + tokens := []string{ + fmt.Sprintf("ref#%x", self.Reference), + } + if self.Flags.ReportZerolen() && self.Flags.IsZerolen() { + tokens = append(tokens, fmt.Sprintf("zero-length")) + } + if self.Flags.LastKnown() && self.Flags.IsLast() { + tokens = append(tokens, "last") + } + if self.Flags.DelimCRCErr() { + tokens = append(tokens, "delimiter CRC error") + } + if self.Flags.DelimCRCKnown() { + tokens = append(tokens, fmt.Sprintf("delimiter-CRC=%02x", self.CRC)) + } + return strings.Join(tokens, ",") +} + +type RadioTapAMPDUStatusFlags uint16 + +const ( + RadioTapAMPDUStatusFlagsReportZerolen RadioTapAMPDUStatusFlags = 1 << iota + RadioTapAMPDUIsZerolen + RadioTapAMPDULastKnown + RadioTapAMPDUIsLast + RadioTapAMPDUDelimCRCErr + RadioTapAMPDUDelimCRCKnown +) + +func (self RadioTapAMPDUStatusFlags) ReportZerolen() bool { + return self&RadioTapAMPDUStatusFlagsReportZerolen != 0 +} +func (self RadioTapAMPDUStatusFlags) IsZerolen() bool { return self&RadioTapAMPDUIsZerolen != 0 } +func (self RadioTapAMPDUStatusFlags) LastKnown() bool { return self&RadioTapAMPDULastKnown != 0 } +func (self RadioTapAMPDUStatusFlags) IsLast() bool { return self&RadioTapAMPDUIsLast != 0 } +func (self RadioTapAMPDUStatusFlags) DelimCRCErr() bool { return self&RadioTapAMPDUDelimCRCErr != 0 } +func (self RadioTapAMPDUStatusFlags) DelimCRCKnown() bool { + return self&RadioTapAMPDUDelimCRCKnown != 0 +} + +type RadioTapVHT struct { + Known RadioTapVHTKnown + Flags RadioTapVHTFlags + Bandwidth uint8 + MCSNSS [4]RadioTapVHTMCSNSS + Coding uint8 + GroupId uint8 + PartialAID uint16 +} + +func (self RadioTapVHT) String() string { + var tokens []string + if self.Known.STBC() { + if self.Flags.STBC() { + tokens = append(tokens, "STBC") + } else { + tokens = append(tokens, "no STBC") + } + } + if self.Known.TXOPPSNotAllowed() { + if self.Flags.TXOPPSNotAllowed() { + tokens = append(tokens, "TXOP doze not allowed") + } else { + tokens = append(tokens, "TXOP doze allowed") + } + } + if self.Known.GI() { + if self.Flags.SGI() { + tokens = append(tokens, "short GI") + } else { + tokens = append(tokens, "long GI") + } + } + if self.Known.SGINSYMDisambiguation() { + if self.Flags.SGINSYMMod() { + tokens = append(tokens, "NSYM mod 10=9") + } else { + tokens = append(tokens, "NSYM mod 10!=9 or no short GI") + } + } + if self.Known.LDPCExtraOFDMSymbol() { + if self.Flags.LDPCExtraOFDMSymbol() { + tokens = append(tokens, "LDPC extra OFDM symbols") + } else { + tokens = append(tokens, "no LDPC extra OFDM symbols") + } + } + if self.Known.Beamformed() { + if self.Flags.Beamformed() { + tokens = append(tokens, "beamformed") + } else { + tokens = append(tokens, "no beamformed") + } + } + if self.Known.Bandwidth() { + token := "?" + switch self.Bandwidth & 0x1f { + case 0: + token = "20" + case 1: + token = "40" + case 2: + token = "40(20L)" + case 3: + token = "40(20U)" + case 4: + token = "80" + case 5: + token = "80(40L)" + case 6: + token = "80(40U)" + case 7: + token = "80(20LL)" + case 8: + token = "80(20LU)" + case 9: + token = "80(20UL)" + case 10: + token = "80(20UU)" + case 11: + token = "160" + case 12: + token = "160(80L)" + case 13: + token = "160(80U)" + case 14: + token = "160(40LL)" + case 15: + token = "160(40LU)" + case 16: + token = "160(40UL)" + case 17: + token = "160(40UU)" + case 18: + token = "160(20LLL)" + case 19: + token = "160(20LLU)" + case 20: + token = "160(20LUL)" + case 21: + token = "160(20LUU)" + case 22: + token = "160(20ULL)" + case 23: + token = "160(20ULU)" + case 24: + token = "160(20UUL)" + case 25: + token = "160(20UUU)" + } + tokens = append(tokens, token) + } + for i, MCSNSS := range self.MCSNSS { + if MCSNSS.Present() { + fec := "?" + switch self.Coding & (1 << uint8(i)) { + case 0: + fec = "BCC" + case 1: + fec = "LDPC" + } + tokens = append(tokens, fmt.Sprintf("user%d(%s,%s)", i, MCSNSS.String(), fec)) + } + } + if self.Known.GroupId() { + tokens = append(tokens, + fmt.Sprintf("group=%d", self.GroupId)) + } + if self.Known.PartialAID() { + tokens = append(tokens, + fmt.Sprintf("partial-AID=%d", self.PartialAID)) + } + return strings.Join(tokens, ",") +} + +type RadioTapVHTKnown uint16 + +const ( + RadioTapVHTKnownSTBC RadioTapVHTKnown = 1 << iota + RadioTapVHTKnownTXOPPSNotAllowed + RadioTapVHTKnownGI + RadioTapVHTKnownSGINSYMDisambiguation + RadioTapVHTKnownLDPCExtraOFDMSymbol + RadioTapVHTKnownBeamformed + RadioTapVHTKnownBandwidth + RadioTapVHTKnownGroupId + RadioTapVHTKnownPartialAID +) + +func (self RadioTapVHTKnown) STBC() bool { return self&RadioTapVHTKnownSTBC != 0 } +func (self RadioTapVHTKnown) TXOPPSNotAllowed() bool { + return self&RadioTapVHTKnownTXOPPSNotAllowed != 0 +} +func (self RadioTapVHTKnown) GI() bool { return self&RadioTapVHTKnownGI != 0 } +func (self RadioTapVHTKnown) SGINSYMDisambiguation() bool { + return self&RadioTapVHTKnownSGINSYMDisambiguation != 0 +} +func (self RadioTapVHTKnown) LDPCExtraOFDMSymbol() bool { + return self&RadioTapVHTKnownLDPCExtraOFDMSymbol != 0 +} +func (self RadioTapVHTKnown) Beamformed() bool { return self&RadioTapVHTKnownBeamformed != 0 } +func (self RadioTapVHTKnown) Bandwidth() bool { return self&RadioTapVHTKnownBandwidth != 0 } +func (self RadioTapVHTKnown) GroupId() bool { return self&RadioTapVHTKnownGroupId != 0 } +func (self RadioTapVHTKnown) PartialAID() bool { return self&RadioTapVHTKnownPartialAID != 0 } + +type RadioTapVHTFlags uint8 + +const ( + RadioTapVHTFlagsSTBC RadioTapVHTFlags = 1 << iota + RadioTapVHTFlagsTXOPPSNotAllowed + RadioTapVHTFlagsSGI + RadioTapVHTFlagsSGINSYMMod + RadioTapVHTFlagsLDPCExtraOFDMSymbol + RadioTapVHTFlagsBeamformed +) + +func (self RadioTapVHTFlags) STBC() bool { return self&RadioTapVHTFlagsSTBC != 0 } +func (self RadioTapVHTFlags) TXOPPSNotAllowed() bool { + return self&RadioTapVHTFlagsTXOPPSNotAllowed != 0 +} +func (self RadioTapVHTFlags) SGI() bool { return self&RadioTapVHTFlagsSGI != 0 } +func (self RadioTapVHTFlags) SGINSYMMod() bool { return self&RadioTapVHTFlagsSGINSYMMod != 0 } +func (self RadioTapVHTFlags) LDPCExtraOFDMSymbol() bool { + return self&RadioTapVHTFlagsLDPCExtraOFDMSymbol != 0 +} +func (self RadioTapVHTFlags) Beamformed() bool { return self&RadioTapVHTFlagsBeamformed != 0 } + +type RadioTapVHTMCSNSS uint8 + +func (self RadioTapVHTMCSNSS) Present() bool { + return self&0x0F != 0 +} + +func (self RadioTapVHTMCSNSS) String() string { + return fmt.Sprintf("NSS#%dMCS#%d", uint32(self&0xf), uint32(self>>4)) +} + +func decodeRadioTap(data []byte, p gopacket.PacketBuilder) error { + d := &RadioTap{} + // TODO: Should we set LinkLayer here? And implement LinkFlow + return decodingLayerDecoder(d, data, p) +} + +type RadioTap struct { + BaseLayer + + // Version 0. Only increases for drastic changes, introduction of compatible new fields does not count. + Version uint8 + // Length of the whole header in bytes, including it_version, it_pad, it_len, and data fields. + Length uint16 + // Present is a bitmap telling which fields are present. Set bit 31 (0x80000000) to extend the bitmap by another 32 bits. Additional extensions are made by setting bit 31. + Present RadioTapPresent + // TSFT: value in microseconds of the MAC's 64-bit 802.11 Time Synchronization Function timer when the first bit of the MPDU arrived at the MAC. For received frames, only. + TSFT uint64 + Flags RadioTapFlags + // Rate Tx/Rx data rate + Rate RadioTapRate + // ChannelFrequency Tx/Rx frequency in MHz, followed by flags + ChannelFrequency RadioTapChannelFrequency + ChannelFlags RadioTapChannelFlags + // FHSS For frequency-hopping radios, the hop set (first byte) and pattern (second byte). + FHSS uint16 + // DBMAntennaSignal RF signal power at the antenna, decibel difference from one milliwatt. + DBMAntennaSignal int8 + // DBMAntennaNoise RF noise power at the antenna, decibel difference from one milliwatt. + DBMAntennaNoise int8 + // LockQuality Quality of Barker code lock. Unitless. Monotonically nondecreasing with "better" lock strength. Called "Signal Quality" in datasheets. + LockQuality uint16 + // TxAttenuation Transmit power expressed as unitless distance from max power set at factory calibration. 0 is max power. Monotonically nondecreasing with lower power levels. + TxAttenuation uint16 + // DBTxAttenuation Transmit power expressed as decibel distance from max power set at factory calibration. 0 is max power. Monotonically nondecreasing with lower power levels. + DBTxAttenuation uint16 + // DBMTxPower Transmit power expressed as dBm (decibels from a 1 milliwatt reference). This is the absolute power level measured at the antenna port. + DBMTxPower int8 + // Antenna Unitless indication of the Rx/Tx antenna for this packet. The first antenna is antenna 0. + Antenna uint8 + // DBAntennaSignal RF signal power at the antenna, decibel difference from an arbitrary, fixed reference. + DBAntennaSignal uint8 + // DBAntennaNoise RF noise power at the antenna, decibel difference from an arbitrary, fixed reference point. + DBAntennaNoise uint8 + // + RxFlags RadioTapRxFlags + TxFlags RadioTapTxFlags + RtsRetries uint8 + DataRetries uint8 + MCS RadioTapMCS + AMPDUStatus RadioTapAMPDUStatus + VHT RadioTapVHT +} + +func (m *RadioTap) LayerType() gopacket.LayerType { return LayerTypeRadioTap } + +func (m *RadioTap) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + df.SetTruncated() + return errors.New("RadioTap too small") + } + m.Version = uint8(data[0]) + m.Length = binary.LittleEndian.Uint16(data[2:4]) + m.Present = RadioTapPresent(binary.LittleEndian.Uint32(data[4:8])) + + offset := uint16(4) + + for (binary.LittleEndian.Uint32(data[offset:offset+4]) & 0x80000000) != 0 { + // This parser only handles standard radiotap namespace, + // and expects all fields are packed in the first it_present. + // Extended bitmap will be just ignored. + offset += 4 + } + offset += 4 // skip the bitmap + + if m.Present.TSFT() { + offset += align(offset, 8) + m.TSFT = binary.LittleEndian.Uint64(data[offset : offset+8]) + offset += 8 + } + if m.Present.Flags() { + m.Flags = RadioTapFlags(data[offset]) + offset++ + } + if m.Present.Rate() { + m.Rate = RadioTapRate(data[offset]) + offset++ + } + if m.Present.Channel() { + offset += align(offset, 2) + m.ChannelFrequency = RadioTapChannelFrequency(binary.LittleEndian.Uint16(data[offset : offset+2])) + offset += 2 + m.ChannelFlags = RadioTapChannelFlags(binary.LittleEndian.Uint16(data[offset : offset+2])) + offset += 2 + } + if m.Present.FHSS() { + m.FHSS = binary.LittleEndian.Uint16(data[offset : offset+2]) + offset += 2 + } + if m.Present.DBMAntennaSignal() { + m.DBMAntennaSignal = int8(data[offset]) + offset++ + } + if m.Present.DBMAntennaNoise() { + m.DBMAntennaNoise = int8(data[offset]) + offset++ + } + if m.Present.LockQuality() { + offset += align(offset, 2) + m.LockQuality = binary.LittleEndian.Uint16(data[offset : offset+2]) + offset += 2 + } + if m.Present.TxAttenuation() { + offset += align(offset, 2) + m.TxAttenuation = binary.LittleEndian.Uint16(data[offset : offset+2]) + offset += 2 + } + if m.Present.DBTxAttenuation() { + offset += align(offset, 2) + m.DBTxAttenuation = binary.LittleEndian.Uint16(data[offset : offset+2]) + offset += 2 + } + if m.Present.DBMTxPower() { + m.DBMTxPower = int8(data[offset]) + offset++ + } + if m.Present.Antenna() { + m.Antenna = uint8(data[offset]) + offset++ + } + if m.Present.DBAntennaSignal() { + m.DBAntennaSignal = uint8(data[offset]) + offset++ + } + if m.Present.DBAntennaNoise() { + m.DBAntennaNoise = uint8(data[offset]) + offset++ + } + if m.Present.RxFlags() { + offset += align(offset, 2) + m.RxFlags = RadioTapRxFlags(binary.LittleEndian.Uint16(data[offset:])) + offset += 2 + } + if m.Present.TxFlags() { + offset += align(offset, 2) + m.TxFlags = RadioTapTxFlags(binary.LittleEndian.Uint16(data[offset:])) + offset += 2 + } + if m.Present.RtsRetries() { + m.RtsRetries = uint8(data[offset]) + offset++ + } + if m.Present.DataRetries() { + m.DataRetries = uint8(data[offset]) + offset++ + } + if m.Present.MCS() { + m.MCS = RadioTapMCS{ + RadioTapMCSKnown(data[offset]), + RadioTapMCSFlags(data[offset+1]), + uint8(data[offset+2]), + } + offset += 3 + } + if m.Present.AMPDUStatus() { + offset += align(offset, 4) + m.AMPDUStatus = RadioTapAMPDUStatus{ + Reference: binary.LittleEndian.Uint32(data[offset:]), + Flags: RadioTapAMPDUStatusFlags(binary.LittleEndian.Uint16(data[offset+4:])), + CRC: uint8(data[offset+6]), + } + offset += 8 + } + if m.Present.VHT() { + offset += align(offset, 2) + m.VHT = RadioTapVHT{ + Known: RadioTapVHTKnown(binary.LittleEndian.Uint16(data[offset:])), + Flags: RadioTapVHTFlags(data[offset+2]), + Bandwidth: uint8(data[offset+3]), + MCSNSS: [4]RadioTapVHTMCSNSS{ + RadioTapVHTMCSNSS(data[offset+4]), + RadioTapVHTMCSNSS(data[offset+5]), + RadioTapVHTMCSNSS(data[offset+6]), + RadioTapVHTMCSNSS(data[offset+7]), + }, + Coding: uint8(data[offset+8]), + GroupId: uint8(data[offset+9]), + PartialAID: binary.LittleEndian.Uint16(data[offset+10:]), + } + offset += 12 + } + + payload := data[m.Length:] + + // Remove non standard padding used by some Wi-Fi drivers + if m.Flags.Datapad() && + payload[0]&0xC == 0x8 { //&& // Data frame + headlen := 24 + if payload[0]&0x8C == 0x88 { // QoS + headlen += 2 + } + if payload[1]&0x3 == 0x3 { // 4 addresses + headlen += 2 + } + if headlen%4 == 2 { + payload = append(payload[:headlen], payload[headlen+2:len(payload)]...) + } + } + + if !m.Flags.FCS() { + // Dot11.DecodeFromBytes() expects FCS present and performs a hard chop on the checksum + // If a user is handing in subslices or packets from a buffered stream, the capacity of the slice + // may extend beyond the len, rather than expecting callers to enforce cap==len on every packet + // we take the hit in this one case and do a reallocation. If the user DOES enforce cap==len + // then the reallocation will happen anyway on the append. This is requried because the append + // write to the memory directly after the payload if there is sufficient capacity, which callers + // may not expect. + reallocPayload := make([]byte, len(payload)+4) + copy(reallocPayload[0:len(payload)], payload) + h := crc32.NewIEEE() + h.Write(payload) + binary.LittleEndian.PutUint32(reallocPayload[len(payload):], h.Sum32()) + payload = reallocPayload + } + m.BaseLayer = BaseLayer{Contents: data[:m.Length], Payload: payload} + + return nil +} + +func (m RadioTap) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf := make([]byte, 1024) + + buf[0] = m.Version + buf[1] = 0 + + binary.LittleEndian.PutUint32(buf[4:8], uint32(m.Present)) + + offset := uint16(4) + + for (binary.LittleEndian.Uint32(buf[offset:offset+4]) & 0x80000000) != 0 { + offset += 4 + } + + offset += 4 + + if m.Present.TSFT() { + offset += align(offset, 8) + binary.LittleEndian.PutUint64(buf[offset:offset+8], m.TSFT) + offset += 8 + } + + if m.Present.Flags() { + buf[offset] = uint8(m.Flags) + offset++ + } + + if m.Present.Rate() { + buf[offset] = uint8(m.Rate) + offset++ + } + + if m.Present.Channel() { + offset += align(offset, 2) + binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.ChannelFrequency)) + offset += 2 + binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.ChannelFlags)) + offset += 2 + } + + if m.Present.FHSS() { + binary.LittleEndian.PutUint16(buf[offset:offset+2], m.FHSS) + offset += 2 + } + + if m.Present.DBMAntennaSignal() { + buf[offset] = byte(m.DBMAntennaSignal) + offset++ + } + + if m.Present.DBMAntennaNoise() { + buf[offset] = byte(m.DBMAntennaNoise) + offset++ + } + + if m.Present.LockQuality() { + offset += align(offset, 2) + binary.LittleEndian.PutUint16(buf[offset:offset+2], m.LockQuality) + offset += 2 + } + + if m.Present.TxAttenuation() { + offset += align(offset, 2) + binary.LittleEndian.PutUint16(buf[offset:offset+2], m.TxAttenuation) + offset += 2 + } + + if m.Present.DBTxAttenuation() { + offset += align(offset, 2) + binary.LittleEndian.PutUint16(buf[offset:offset+2], m.DBTxAttenuation) + offset += 2 + } + + if m.Present.DBMTxPower() { + buf[offset] = byte(m.DBMTxPower) + offset++ + } + + if m.Present.Antenna() { + buf[offset] = uint8(m.Antenna) + offset++ + } + + if m.Present.DBAntennaSignal() { + buf[offset] = uint8(m.DBAntennaSignal) + offset++ + } + + if m.Present.DBAntennaNoise() { + buf[offset] = uint8(m.DBAntennaNoise) + offset++ + } + + if m.Present.RxFlags() { + offset += align(offset, 2) + binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.RxFlags)) + offset += 2 + } + + if m.Present.TxFlags() { + offset += align(offset, 2) + binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.TxFlags)) + offset += 2 + } + + if m.Present.RtsRetries() { + buf[offset] = m.RtsRetries + offset++ + } + + if m.Present.DataRetries() { + buf[offset] = m.DataRetries + offset++ + } + + if m.Present.MCS() { + buf[offset] = uint8(m.MCS.Known) + buf[offset+1] = uint8(m.MCS.Flags) + buf[offset+2] = uint8(m.MCS.MCS) + + offset += 3 + } + + if m.Present.AMPDUStatus() { + offset += align(offset, 4) + + binary.LittleEndian.PutUint32(buf[offset:offset+4], m.AMPDUStatus.Reference) + binary.LittleEndian.PutUint16(buf[offset+4:offset+6], uint16(m.AMPDUStatus.Flags)) + + buf[offset+6] = m.AMPDUStatus.CRC + + offset += 8 + } + + if m.Present.VHT() { + offset += align(offset, 2) + + binary.LittleEndian.PutUint16(buf[offset:], uint16(m.VHT.Known)) + + buf[offset+2] = uint8(m.VHT.Flags) + buf[offset+3] = uint8(m.VHT.Bandwidth) + buf[offset+4] = uint8(m.VHT.MCSNSS[0]) + buf[offset+5] = uint8(m.VHT.MCSNSS[1]) + buf[offset+6] = uint8(m.VHT.MCSNSS[2]) + buf[offset+7] = uint8(m.VHT.MCSNSS[3]) + buf[offset+8] = uint8(m.VHT.Coding) + buf[offset+9] = uint8(m.VHT.GroupId) + + binary.LittleEndian.PutUint16(buf[offset+10:offset+12], m.VHT.PartialAID) + + offset += 12 + } + + packetBuf, err := b.PrependBytes(int(offset)) + + if err != nil { + return err + } + + if opts.FixLengths { + m.Length = offset + } + + binary.LittleEndian.PutUint16(buf[2:4], m.Length) + + copy(packetBuf, buf) + + return nil +} + +func (m *RadioTap) CanDecode() gopacket.LayerClass { return LayerTypeRadioTap } +func (m *RadioTap) NextLayerType() gopacket.LayerType { return LayerTypeDot11 } diff --git a/vendor/github.com/google/gopacket/layers/radius.go b/vendor/github.com/google/gopacket/layers/radius.go new file mode 100644 index 0000000000..c43ea29451 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/radius.go @@ -0,0 +1,560 @@ +// Copyright 2020 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file in the root of the source tree. + +package layers + +import ( + "encoding/binary" + "fmt" + + "github.com/google/gopacket" +) + +const ( + // RFC 2865 3. Packet Format + // `The minimum length is 20 and maximum length is 4096.` + radiusMinimumRecordSizeInBytes int = 20 + radiusMaximumRecordSizeInBytes int = 4096 + + // RFC 2865 5. Attributes + // `The Length field is one octet, and indicates the length of this Attribute including the Type, Length and Value fields.` + // `The Value field is zero or more octets and contains information specific to the Attribute.` + radiusAttributesMinimumRecordSizeInBytes int = 2 +) + +// RADIUS represents a Remote Authentication Dial In User Service layer. +type RADIUS struct { + BaseLayer + + Code RADIUSCode + Identifier RADIUSIdentifier + Length RADIUSLength + Authenticator RADIUSAuthenticator + Attributes []RADIUSAttribute +} + +// RADIUSCode represents packet type. +type RADIUSCode uint8 + +// constants that define RADIUSCode. +const ( + RADIUSCodeAccessRequest RADIUSCode = 1 // RFC2865 3. Packet Format + RADIUSCodeAccessAccept RADIUSCode = 2 // RFC2865 3. Packet Format + RADIUSCodeAccessReject RADIUSCode = 3 // RFC2865 3. Packet Format + RADIUSCodeAccountingRequest RADIUSCode = 4 // RFC2865 3. Packet Format + RADIUSCodeAccountingResponse RADIUSCode = 5 // RFC2865 3. Packet Format + RADIUSCodeAccessChallenge RADIUSCode = 11 // RFC2865 3. Packet Format + RADIUSCodeStatusServer RADIUSCode = 12 // RFC2865 3. Packet Format (experimental) + RADIUSCodeStatusClient RADIUSCode = 13 // RFC2865 3. Packet Format (experimental) + RADIUSCodeReserved RADIUSCode = 255 // RFC2865 3. Packet Format +) + +// String returns a string version of a RADIUSCode. +func (t RADIUSCode) String() (s string) { + switch t { + case RADIUSCodeAccessRequest: + s = "Access-Request" + case RADIUSCodeAccessAccept: + s = "Access-Accept" + case RADIUSCodeAccessReject: + s = "Access-Reject" + case RADIUSCodeAccountingRequest: + s = "Accounting-Request" + case RADIUSCodeAccountingResponse: + s = "Accounting-Response" + case RADIUSCodeAccessChallenge: + s = "Access-Challenge" + case RADIUSCodeStatusServer: + s = "Status-Server" + case RADIUSCodeStatusClient: + s = "Status-Client" + case RADIUSCodeReserved: + s = "Reserved" + default: + s = fmt.Sprintf("Unknown(%d)", t) + } + return +} + +// RADIUSIdentifier represents packet identifier. +type RADIUSIdentifier uint8 + +// RADIUSLength represents packet length. +type RADIUSLength uint16 + +// RADIUSAuthenticator represents authenticator. +type RADIUSAuthenticator [16]byte + +// RADIUSAttribute represents attributes. +type RADIUSAttribute struct { + Type RADIUSAttributeType + Length RADIUSAttributeLength + Value RADIUSAttributeValue +} + +// RADIUSAttributeType represents attribute type. +type RADIUSAttributeType uint8 + +// constants that define RADIUSAttributeType. +const ( + RADIUSAttributeTypeUserName RADIUSAttributeType = 1 // RFC2865 5.1. User-Name + RADIUSAttributeTypeUserPassword RADIUSAttributeType = 2 // RFC2865 5.2. User-Password + RADIUSAttributeTypeCHAPPassword RADIUSAttributeType = 3 // RFC2865 5.3. CHAP-Password + RADIUSAttributeTypeNASIPAddress RADIUSAttributeType = 4 // RFC2865 5.4. NAS-IP-Address + RADIUSAttributeTypeNASPort RADIUSAttributeType = 5 // RFC2865 5.5. NAS-Port + RADIUSAttributeTypeServiceType RADIUSAttributeType = 6 // RFC2865 5.6. Service-Type + RADIUSAttributeTypeFramedProtocol RADIUSAttributeType = 7 // RFC2865 5.7. Framed-Protocol + RADIUSAttributeTypeFramedIPAddress RADIUSAttributeType = 8 // RFC2865 5.8. Framed-IP-Address + RADIUSAttributeTypeFramedIPNetmask RADIUSAttributeType = 9 // RFC2865 5.9. Framed-IP-Netmask + RADIUSAttributeTypeFramedRouting RADIUSAttributeType = 10 // RFC2865 5.10. Framed-Routing + RADIUSAttributeTypeFilterId RADIUSAttributeType = 11 // RFC2865 5.11. Filter-Id + RADIUSAttributeTypeFramedMTU RADIUSAttributeType = 12 // RFC2865 5.12. Framed-MTU + RADIUSAttributeTypeFramedCompression RADIUSAttributeType = 13 // RFC2865 5.13. Framed-Compression + RADIUSAttributeTypeLoginIPHost RADIUSAttributeType = 14 // RFC2865 5.14. Login-IP-Host + RADIUSAttributeTypeLoginService RADIUSAttributeType = 15 // RFC2865 5.15. Login-Service + RADIUSAttributeTypeLoginTCPPort RADIUSAttributeType = 16 // RFC2865 5.16. Login-TCP-Port + RADIUSAttributeTypeReplyMessage RADIUSAttributeType = 18 // RFC2865 5.18. Reply-Message + RADIUSAttributeTypeCallbackNumber RADIUSAttributeType = 19 // RFC2865 5.19. Callback-Number + RADIUSAttributeTypeCallbackId RADIUSAttributeType = 20 // RFC2865 5.20. Callback-Id + RADIUSAttributeTypeFramedRoute RADIUSAttributeType = 22 // RFC2865 5.22. Framed-Route + RADIUSAttributeTypeFramedIPXNetwork RADIUSAttributeType = 23 // RFC2865 5.23. Framed-IPX-Network + RADIUSAttributeTypeState RADIUSAttributeType = 24 // RFC2865 5.24. State + RADIUSAttributeTypeClass RADIUSAttributeType = 25 // RFC2865 5.25. Class + RADIUSAttributeTypeVendorSpecific RADIUSAttributeType = 26 // RFC2865 5.26. Vendor-Specific + RADIUSAttributeTypeSessionTimeout RADIUSAttributeType = 27 // RFC2865 5.27. Session-Timeout + RADIUSAttributeTypeIdleTimeout RADIUSAttributeType = 28 // RFC2865 5.28. Idle-Timeout + RADIUSAttributeTypeTerminationAction RADIUSAttributeType = 29 // RFC2865 5.29. Termination-Action + RADIUSAttributeTypeCalledStationId RADIUSAttributeType = 30 // RFC2865 5.30. Called-Station-Id + RADIUSAttributeTypeCallingStationId RADIUSAttributeType = 31 // RFC2865 5.31. Calling-Station-Id + RADIUSAttributeTypeNASIdentifier RADIUSAttributeType = 32 // RFC2865 5.32. NAS-Identifier + RADIUSAttributeTypeProxyState RADIUSAttributeType = 33 // RFC2865 5.33. Proxy-State + RADIUSAttributeTypeLoginLATService RADIUSAttributeType = 34 // RFC2865 5.34. Login-LAT-Service + RADIUSAttributeTypeLoginLATNode RADIUSAttributeType = 35 // RFC2865 5.35. Login-LAT-Node + RADIUSAttributeTypeLoginLATGroup RADIUSAttributeType = 36 // RFC2865 5.36. Login-LAT-Group + RADIUSAttributeTypeFramedAppleTalkLink RADIUSAttributeType = 37 // RFC2865 5.37. Framed-AppleTalk-Link + RADIUSAttributeTypeFramedAppleTalkNetwork RADIUSAttributeType = 38 // RFC2865 5.38. Framed-AppleTalk-Network + RADIUSAttributeTypeFramedAppleTalkZone RADIUSAttributeType = 39 // RFC2865 5.39. Framed-AppleTalk-Zone + RADIUSAttributeTypeAcctStatusType RADIUSAttributeType = 40 // RFC2866 5.1. Acct-Status-Type + RADIUSAttributeTypeAcctDelayTime RADIUSAttributeType = 41 // RFC2866 5.2. Acct-Delay-Time + RADIUSAttributeTypeAcctInputOctets RADIUSAttributeType = 42 // RFC2866 5.3. Acct-Input-Octets + RADIUSAttributeTypeAcctOutputOctets RADIUSAttributeType = 43 // RFC2866 5.4. Acct-Output-Octets + RADIUSAttributeTypeAcctSessionId RADIUSAttributeType = 44 // RFC2866 5.5. Acct-Session-Id + RADIUSAttributeTypeAcctAuthentic RADIUSAttributeType = 45 // RFC2866 5.6. Acct-Authentic + RADIUSAttributeTypeAcctSessionTime RADIUSAttributeType = 46 // RFC2866 5.7. Acct-Session-Time + RADIUSAttributeTypeAcctInputPackets RADIUSAttributeType = 47 // RFC2866 5.8. Acct-Input-Packets + RADIUSAttributeTypeAcctOutputPackets RADIUSAttributeType = 48 // RFC2866 5.9. Acct-Output-Packets + RADIUSAttributeTypeAcctTerminateCause RADIUSAttributeType = 49 // RFC2866 5.10. Acct-Terminate-Cause + RADIUSAttributeTypeAcctMultiSessionId RADIUSAttributeType = 50 // RFC2866 5.11. Acct-Multi-Session-Id + RADIUSAttributeTypeAcctLinkCount RADIUSAttributeType = 51 // RFC2866 5.12. Acct-Link-Count + RADIUSAttributeTypeAcctInputGigawords RADIUSAttributeType = 52 // RFC2869 5.1. Acct-Input-Gigawords + RADIUSAttributeTypeAcctOutputGigawords RADIUSAttributeType = 53 // RFC2869 5.2. Acct-Output-Gigawords + RADIUSAttributeTypeEventTimestamp RADIUSAttributeType = 55 // RFC2869 5.3. Event-Timestamp + RADIUSAttributeTypeCHAPChallenge RADIUSAttributeType = 60 // RFC2865 5.40. CHAP-Challenge + RADIUSAttributeTypeNASPortType RADIUSAttributeType = 61 // RFC2865 5.41. NAS-Port-Type + RADIUSAttributeTypePortLimit RADIUSAttributeType = 62 // RFC2865 5.42. Port-Limit + RADIUSAttributeTypeLoginLATPort RADIUSAttributeType = 63 // RFC2865 5.43. Login-LAT-Port + RADIUSAttributeTypeTunnelType RADIUSAttributeType = 64 // RFC2868 3.1. Tunnel-Type + RADIUSAttributeTypeTunnelMediumType RADIUSAttributeType = 65 // RFC2868 3.2. Tunnel-Medium-Type + RADIUSAttributeTypeTunnelClientEndpoint RADIUSAttributeType = 66 // RFC2868 3.3. Tunnel-Client-Endpoint + RADIUSAttributeTypeTunnelServerEndpoint RADIUSAttributeType = 67 // RFC2868 3.4. Tunnel-Server-Endpoint + RADIUSAttributeTypeAcctTunnelConnection RADIUSAttributeType = 68 // RFC2867 4.1. Acct-Tunnel-Connection + RADIUSAttributeTypeTunnelPassword RADIUSAttributeType = 69 // RFC2868 3.5. Tunnel-Password + RADIUSAttributeTypeARAPPassword RADIUSAttributeType = 70 // RFC2869 5.4. ARAP-Password + RADIUSAttributeTypeARAPFeatures RADIUSAttributeType = 71 // RFC2869 5.5. ARAP-Features + RADIUSAttributeTypeARAPZoneAccess RADIUSAttributeType = 72 // RFC2869 5.6. ARAP-Zone-Access + RADIUSAttributeTypeARAPSecurity RADIUSAttributeType = 73 // RFC2869 5.7. ARAP-Security + RADIUSAttributeTypeARAPSecurityData RADIUSAttributeType = 74 // RFC2869 5.8. ARAP-Security-Data + RADIUSAttributeTypePasswordRetry RADIUSAttributeType = 75 // RFC2869 5.9. Password-Retry + RADIUSAttributeTypePrompt RADIUSAttributeType = 76 // RFC2869 5.10. Prompt + RADIUSAttributeTypeConnectInfo RADIUSAttributeType = 77 // RFC2869 5.11. Connect-Info + RADIUSAttributeTypeConfigurationToken RADIUSAttributeType = 78 // RFC2869 5.12. Configuration-Token + RADIUSAttributeTypeEAPMessage RADIUSAttributeType = 79 // RFC2869 5.13. EAP-Message + RADIUSAttributeTypeMessageAuthenticator RADIUSAttributeType = 80 // RFC2869 5.14. Message-Authenticator + RADIUSAttributeTypeTunnelPrivateGroupID RADIUSAttributeType = 81 // RFC2868 3.6. Tunnel-Private-Group-ID + RADIUSAttributeTypeTunnelAssignmentID RADIUSAttributeType = 82 // RFC2868 3.7. Tunnel-Assignment-ID + RADIUSAttributeTypeTunnelPreference RADIUSAttributeType = 83 // RFC2868 3.8. Tunnel-Preference + RADIUSAttributeTypeARAPChallengeResponse RADIUSAttributeType = 84 // RFC2869 5.15. ARAP-Challenge-Response + RADIUSAttributeTypeAcctInterimInterval RADIUSAttributeType = 85 // RFC2869 5.16. Acct-Interim-Interval + RADIUSAttributeTypeAcctTunnelPacketsLost RADIUSAttributeType = 86 // RFC2867 4.2. Acct-Tunnel-Packets-Lost + RADIUSAttributeTypeNASPortId RADIUSAttributeType = 87 // RFC2869 5.17. NAS-Port-Id + RADIUSAttributeTypeFramedPool RADIUSAttributeType = 88 // RFC2869 5.18. Framed-Pool + RADIUSAttributeTypeTunnelClientAuthID RADIUSAttributeType = 90 // RFC2868 3.9. Tunnel-Client-Auth-ID + RADIUSAttributeTypeTunnelServerAuthID RADIUSAttributeType = 91 // RFC2868 3.10. Tunnel-Server-Auth-ID +) + +// RADIUSAttributeType represents attribute length. +type RADIUSAttributeLength uint8 + +// RADIUSAttributeType represents attribute value. +type RADIUSAttributeValue []byte + +// String returns a string version of a RADIUSAttributeType. +func (t RADIUSAttributeType) String() (s string) { + switch t { + case RADIUSAttributeTypeUserName: + s = "User-Name" + case RADIUSAttributeTypeUserPassword: + s = "User-Password" + case RADIUSAttributeTypeCHAPPassword: + s = "CHAP-Password" + case RADIUSAttributeTypeNASIPAddress: + s = "NAS-IP-Address" + case RADIUSAttributeTypeNASPort: + s = "NAS-Port" + case RADIUSAttributeTypeServiceType: + s = "Service-Type" + case RADIUSAttributeTypeFramedProtocol: + s = "Framed-Protocol" + case RADIUSAttributeTypeFramedIPAddress: + s = "Framed-IP-Address" + case RADIUSAttributeTypeFramedIPNetmask: + s = "Framed-IP-Netmask" + case RADIUSAttributeTypeFramedRouting: + s = "Framed-Routing" + case RADIUSAttributeTypeFilterId: + s = "Filter-Id" + case RADIUSAttributeTypeFramedMTU: + s = "Framed-MTU" + case RADIUSAttributeTypeFramedCompression: + s = "Framed-Compression" + case RADIUSAttributeTypeLoginIPHost: + s = "Login-IP-Host" + case RADIUSAttributeTypeLoginService: + s = "Login-Service" + case RADIUSAttributeTypeLoginTCPPort: + s = "Login-TCP-Port" + case RADIUSAttributeTypeReplyMessage: + s = "Reply-Message" + case RADIUSAttributeTypeCallbackNumber: + s = "Callback-Number" + case RADIUSAttributeTypeCallbackId: + s = "Callback-Id" + case RADIUSAttributeTypeFramedRoute: + s = "Framed-Route" + case RADIUSAttributeTypeFramedIPXNetwork: + s = "Framed-IPX-Network" + case RADIUSAttributeTypeState: + s = "State" + case RADIUSAttributeTypeClass: + s = "Class" + case RADIUSAttributeTypeVendorSpecific: + s = "Vendor-Specific" + case RADIUSAttributeTypeSessionTimeout: + s = "Session-Timeout" + case RADIUSAttributeTypeIdleTimeout: + s = "Idle-Timeout" + case RADIUSAttributeTypeTerminationAction: + s = "Termination-Action" + case RADIUSAttributeTypeCalledStationId: + s = "Called-Station-Id" + case RADIUSAttributeTypeCallingStationId: + s = "Calling-Station-Id" + case RADIUSAttributeTypeNASIdentifier: + s = "NAS-Identifier" + case RADIUSAttributeTypeProxyState: + s = "Proxy-State" + case RADIUSAttributeTypeLoginLATService: + s = "Login-LAT-Service" + case RADIUSAttributeTypeLoginLATNode: + s = "Login-LAT-Node" + case RADIUSAttributeTypeLoginLATGroup: + s = "Login-LAT-Group" + case RADIUSAttributeTypeFramedAppleTalkLink: + s = "Framed-AppleTalk-Link" + case RADIUSAttributeTypeFramedAppleTalkNetwork: + s = "Framed-AppleTalk-Network" + case RADIUSAttributeTypeFramedAppleTalkZone: + s = "Framed-AppleTalk-Zone" + case RADIUSAttributeTypeAcctStatusType: + s = "Acct-Status-Type" + case RADIUSAttributeTypeAcctDelayTime: + s = "Acct-Delay-Time" + case RADIUSAttributeTypeAcctInputOctets: + s = "Acct-Input-Octets" + case RADIUSAttributeTypeAcctOutputOctets: + s = "Acct-Output-Octets" + case RADIUSAttributeTypeAcctSessionId: + s = "Acct-Session-Id" + case RADIUSAttributeTypeAcctAuthentic: + s = "Acct-Authentic" + case RADIUSAttributeTypeAcctSessionTime: + s = "Acct-Session-Time" + case RADIUSAttributeTypeAcctInputPackets: + s = "Acct-Input-Packets" + case RADIUSAttributeTypeAcctOutputPackets: + s = "Acct-Output-Packets" + case RADIUSAttributeTypeAcctTerminateCause: + s = "Acct-Terminate-Cause" + case RADIUSAttributeTypeAcctMultiSessionId: + s = "Acct-Multi-Session-Id" + case RADIUSAttributeTypeAcctLinkCount: + s = "Acct-Link-Count" + case RADIUSAttributeTypeAcctInputGigawords: + s = "Acct-Input-Gigawords" + case RADIUSAttributeTypeAcctOutputGigawords: + s = "Acct-Output-Gigawords" + case RADIUSAttributeTypeEventTimestamp: + s = "Event-Timestamp" + case RADIUSAttributeTypeCHAPChallenge: + s = "CHAP-Challenge" + case RADIUSAttributeTypeNASPortType: + s = "NAS-Port-Type" + case RADIUSAttributeTypePortLimit: + s = "Port-Limit" + case RADIUSAttributeTypeLoginLATPort: + s = "Login-LAT-Port" + case RADIUSAttributeTypeTunnelType: + s = "Tunnel-Type" + case RADIUSAttributeTypeTunnelMediumType: + s = "Tunnel-Medium-Type" + case RADIUSAttributeTypeTunnelClientEndpoint: + s = "Tunnel-Client-Endpoint" + case RADIUSAttributeTypeTunnelServerEndpoint: + s = "Tunnel-Server-Endpoint" + case RADIUSAttributeTypeAcctTunnelConnection: + s = "Acct-Tunnel-Connection" + case RADIUSAttributeTypeTunnelPassword: + s = "Tunnel-Password" + case RADIUSAttributeTypeARAPPassword: + s = "ARAP-Password" + case RADIUSAttributeTypeARAPFeatures: + s = "ARAP-Features" + case RADIUSAttributeTypeARAPZoneAccess: + s = "ARAP-Zone-Access" + case RADIUSAttributeTypeARAPSecurity: + s = "ARAP-Security" + case RADIUSAttributeTypeARAPSecurityData: + s = "ARAP-Security-Data" + case RADIUSAttributeTypePasswordRetry: + s = "Password-Retry" + case RADIUSAttributeTypePrompt: + s = "Prompt" + case RADIUSAttributeTypeConnectInfo: + s = "Connect-Info" + case RADIUSAttributeTypeConfigurationToken: + s = "Configuration-Token" + case RADIUSAttributeTypeEAPMessage: + s = "EAP-Message" + case RADIUSAttributeTypeMessageAuthenticator: + s = "Message-Authenticator" + case RADIUSAttributeTypeTunnelPrivateGroupID: + s = "Tunnel-Private-Group-ID" + case RADIUSAttributeTypeTunnelAssignmentID: + s = "Tunnel-Assignment-ID" + case RADIUSAttributeTypeTunnelPreference: + s = "Tunnel-Preference" + case RADIUSAttributeTypeARAPChallengeResponse: + s = "ARAP-Challenge-Response" + case RADIUSAttributeTypeAcctInterimInterval: + s = "Acct-Interim-Interval" + case RADIUSAttributeTypeAcctTunnelPacketsLost: + s = "Acct-Tunnel-Packets-Lost" + case RADIUSAttributeTypeNASPortId: + s = "NAS-Port-Id" + case RADIUSAttributeTypeFramedPool: + s = "Framed-Pool" + case RADIUSAttributeTypeTunnelClientAuthID: + s = "Tunnel-Client-Auth-ID" + case RADIUSAttributeTypeTunnelServerAuthID: + s = "Tunnel-Server-Auth-ID" + default: + s = fmt.Sprintf("Unknown(%d)", t) + } + return +} + +// Len returns the length of a RADIUS packet. +func (radius *RADIUS) Len() (int, error) { + n := radiusMinimumRecordSizeInBytes + for _, v := range radius.Attributes { + alen, err := attributeValueLength(v.Value) + if err != nil { + return 0, err + } + n += int(alen) + 2 // Added Type and Length + } + return n, nil +} + +// LayerType returns LayerTypeRADIUS. +func (radius *RADIUS) LayerType() gopacket.LayerType { + return LayerTypeRADIUS +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (radius *RADIUS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) > radiusMaximumRecordSizeInBytes { + df.SetTruncated() + return fmt.Errorf("RADIUS length %d too big", len(data)) + } + + if len(data) < radiusMinimumRecordSizeInBytes { + df.SetTruncated() + return fmt.Errorf("RADIUS length %d too short", len(data)) + } + + radius.BaseLayer = BaseLayer{Contents: data} + + radius.Code = RADIUSCode(data[0]) + radius.Identifier = RADIUSIdentifier(data[1]) + radius.Length = RADIUSLength(binary.BigEndian.Uint16(data[2:4])) + + if int(radius.Length) > radiusMaximumRecordSizeInBytes { + df.SetTruncated() + return fmt.Errorf("RADIUS length %d too big", radius.Length) + } + + if int(radius.Length) < radiusMinimumRecordSizeInBytes { + df.SetTruncated() + return fmt.Errorf("RADIUS length %d too short", radius.Length) + } + + // RFC 2865 3. Packet Format + // `If the packet is shorter than the Length field indicates, it MUST be silently discarded.` + if int(radius.Length) > len(data) { + df.SetTruncated() + return fmt.Errorf("RADIUS length %d too big", radius.Length) + } + + // RFC 2865 3. Packet Format + // `Octets outside the range of the Length field MUST be treated as padding and ignored on reception.` + if int(radius.Length) < len(data) { + df.SetTruncated() + data = data[:radius.Length] + } + + copy(radius.Authenticator[:], data[4:20]) + + if len(data) == radiusMinimumRecordSizeInBytes { + return nil + } + + pos := radiusMinimumRecordSizeInBytes + for { + if len(data) == pos { + break + } + + if len(data[pos:]) < radiusAttributesMinimumRecordSizeInBytes { + df.SetTruncated() + return fmt.Errorf("RADIUS attributes length %d too short", len(data[pos:])) + } + + attr := RADIUSAttribute{} + attr.Type = RADIUSAttributeType(data[pos]) + attr.Length = RADIUSAttributeLength(data[pos+1]) + + if int(attr.Length) > len(data[pos:]) { + df.SetTruncated() + return fmt.Errorf("RADIUS attributes length %d too big", attr.Length) + } + + if int(attr.Length) < radiusAttributesMinimumRecordSizeInBytes { + df.SetTruncated() + return fmt.Errorf("RADIUS attributes length %d too short", attr.Length) + } + + if int(attr.Length) > radiusAttributesMinimumRecordSizeInBytes { + attr.Value = make([]byte, attr.Length-2) + copy(attr.Value[:], data[pos+2:pos+int(attr.Length)]) + radius.Attributes = append(radius.Attributes, attr) + } + + pos += int(attr.Length) + } + + for _, v := range radius.Attributes { + if v.Type == RADIUSAttributeTypeEAPMessage { + radius.BaseLayer.Payload = append(radius.BaseLayer.Payload, v.Value...) + } + } + + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (radius *RADIUS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + plen, err := radius.Len() + if err != nil { + return err + } + + if opts.FixLengths { + radius.Length = RADIUSLength(plen) + } + + data, err := b.PrependBytes(plen) + if err != nil { + return err + } + + data[0] = byte(radius.Code) + data[1] = byte(radius.Identifier) + binary.BigEndian.PutUint16(data[2:], uint16(radius.Length)) + copy(data[4:20], radius.Authenticator[:]) + + pos := radiusMinimumRecordSizeInBytes + for _, v := range radius.Attributes { + if opts.FixLengths { + v.Length, err = attributeValueLength(v.Value) + if err != nil { + return err + } + } + + data[pos] = byte(v.Type) + data[pos+1] = byte(v.Length) + copy(data[pos+2:], v.Value[:]) + + pos += len(v.Value) + 2 // Added Type and Length + } + + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (radius *RADIUS) CanDecode() gopacket.LayerClass { + return LayerTypeRADIUS +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (radius *RADIUS) NextLayerType() gopacket.LayerType { + if len(radius.BaseLayer.Payload) > 0 { + return LayerTypeEAP + } else { + return gopacket.LayerTypeZero + } +} + +// Payload returns the EAP Type-Data for EAP-Message attributes. +func (radius *RADIUS) Payload() []byte { + return radius.BaseLayer.Payload +} + +func decodeRADIUS(data []byte, p gopacket.PacketBuilder) error { + radius := &RADIUS{} + err := radius.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(radius) + p.SetApplicationLayer(radius) + next := radius.NextLayerType() + if next == gopacket.LayerTypeZero { + return nil + } + return p.NextDecoder(next) +} + +func attributeValueLength(v []byte) (RADIUSAttributeLength, error) { + n := len(v) + if n > 255 { + return 0, fmt.Errorf("RADIUS attribute value length %d too long", n) + } else { + return RADIUSAttributeLength(n), nil + } +} diff --git a/vendor/github.com/google/gopacket/layers/rmcp.go b/vendor/github.com/google/gopacket/layers/rmcp.go new file mode 100644 index 0000000000..5474fee4ad --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/rmcp.go @@ -0,0 +1,170 @@ +// Copyright 2019 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file in the root of the source tree. + +package layers + +// This file implements the ASF-RMCP header specified in section 3.2.2.2 of +// https://www.dmtf.org/sites/default/files/standards/documents/DSP0136.pdf + +import ( + "fmt" + + "github.com/google/gopacket" +) + +// RMCPClass is the class of a RMCP layer's payload, e.g. ASF or IPMI. This is a +// 4-bit unsigned int on the wire; all but 6 (ASF), 7 (IPMI) and 8 (OEM-defined) +// are currently reserved. +type RMCPClass uint8 + +// LayerType returns the payload layer type corresponding to a RMCP class. +func (c RMCPClass) LayerType() gopacket.LayerType { + if lt := rmcpClassLayerTypes[uint8(c)]; lt != 0 { + return lt + } + return gopacket.LayerTypePayload +} + +func (c RMCPClass) String() string { + return fmt.Sprintf("%v(%v)", uint8(c), c.LayerType()) +} + +const ( + // RMCPVersion1 identifies RMCP v1.0 in the Version header field. Lower + // values are considered legacy, while higher values are reserved by the + // specification. + RMCPVersion1 uint8 = 0x06 + + // RMCPNormal indicates a "normal" message, i.e. not an acknowledgement. + RMCPNormal uint8 = 0 + + // RMCPAck indicates a message is acknowledging a received normal message. + RMCPAck uint8 = 1 << 7 + + // RMCPClassASF identifies an RMCP message as containing an ASF-RMCP + // payload. + RMCPClassASF RMCPClass = 0x06 + + // RMCPClassIPMI identifies an RMCP message as containing an IPMI payload. + RMCPClassIPMI RMCPClass = 0x07 + + // RMCPClassOEM identifies an RMCP message as containing an OEM-defined + // payload. + RMCPClassOEM RMCPClass = 0x08 +) + +var ( + rmcpClassLayerTypes = [16]gopacket.LayerType{ + RMCPClassASF: LayerTypeASF, + // RMCPClassIPMI is to implement; RMCPClassOEM is deliberately not + // implemented, so we return LayerTypePayload + } +) + +// RegisterRMCPLayerType allows specifying that the payload of a RMCP packet of +// a certain class should processed by the provided layer type. This overrides +// any existing registrations, including defaults. +func RegisterRMCPLayerType(c RMCPClass, l gopacket.LayerType) { + rmcpClassLayerTypes[c] = l +} + +// RMCP describes the format of an RMCP header, which forms a UDP payload. See +// section 3.2.2.2. +type RMCP struct { + BaseLayer + + // Version identifies the version of the RMCP header. 0x06 indicates RMCP + // v1.0; lower values are legacy, higher values are reserved. + Version uint8 + + // Sequence is the sequence number assicated with the message. Note that + // this rolls over to 0 after 254, not 255. Seq num 255 indicates the + // receiver must not send an ACK. + Sequence uint8 + + // Ack indicates whether this packet is an acknowledgement. If it is, the + // payload will be empty. + Ack bool + + // Class idicates the structure of the payload. There are only 2^4 valid + // values, however there is no uint4 data type. N.B. the Ack bit has been + // split off into another field. The most significant 4 bits of this field + // will always be 0. + Class RMCPClass +} + +// LayerType returns LayerTypeRMCP. It partially satisfies Layer and +// SerializableLayer. +func (*RMCP) LayerType() gopacket.LayerType { + return LayerTypeRMCP +} + +// CanDecode returns LayerTypeRMCP. It partially satisfies DecodingLayer. +func (r *RMCP) CanDecode() gopacket.LayerClass { + return r.LayerType() +} + +// DecodeFromBytes makes the layer represent the provided bytes. It partially +// satisfies DecodingLayer. +func (r *RMCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return fmt.Errorf("invalid RMCP header, length %v less than 4", + len(data)) + } + + r.BaseLayer.Contents = data[:4] + r.BaseLayer.Payload = data[4:] + + r.Version = uint8(data[0]) + // 1 byte reserved + r.Sequence = uint8(data[2]) + r.Ack = data[3]&RMCPAck != 0 + r.Class = RMCPClass(data[3] & 0xF) + return nil +} + +// NextLayerType returns the data layer of this RMCP layer. This partially +// satisfies DecodingLayer. +func (r *RMCP) NextLayerType() gopacket.LayerType { + return r.Class.LayerType() +} + +// Payload returns the data layer. It partially satisfies ApplicationLayer. +func (r *RMCP) Payload() []byte { + return r.BaseLayer.Payload +} + +// SerializeTo writes the serialized fom of this layer into the SerializeBuffer, +// partially satisfying SerializableLayer. +func (r *RMCP) SerializeTo(b gopacket.SerializeBuffer, _ gopacket.SerializeOptions) error { + // The IPMI v1.5 spec contains a pad byte for frame sizes of certain lengths + // to work around issues in LAN chips. This is no longer necessary as of + // IPMI v2.0 (renamed to "legacy pad") so we do not attempt to add it. The + // same approach is taken by FreeIPMI: + // http://git.savannah.gnu.org/cgit/freeipmi.git/tree/libfreeipmi/interface/ipmi-lan-interface.c?id=b5ffcd38317daf42074458879f4c55ba6804a595#n836 + bytes, err := b.PrependBytes(4) + if err != nil { + return err + } + bytes[0] = r.Version + bytes[1] = 0x00 + bytes[2] = r.Sequence + bytes[3] = bool2uint8(r.Ack)<<7 | uint8(r.Class) // thanks, BFD layer + return nil +} + +// decodeRMCP decodes the byte slice into an RMCP type, and sets the application +// layer to it. +func decodeRMCP(data []byte, p gopacket.PacketBuilder) error { + rmcp := &RMCP{} + err := rmcp.DecodeFromBytes(data, p) + p.AddLayer(rmcp) + p.SetApplicationLayer(rmcp) + if err != nil { + return err + } + return p.NextDecoder(rmcp.NextLayerType()) +} diff --git a/vendor/github.com/google/gopacket/layers/rudp.go b/vendor/github.com/google/gopacket/layers/rudp.go new file mode 100644 index 0000000000..8435129b94 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/rudp.go @@ -0,0 +1,93 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + "github.com/google/gopacket" +) + +type RUDP struct { + BaseLayer + SYN, ACK, EACK, RST, NUL bool + Version uint8 + HeaderLength uint8 + SrcPort, DstPort RUDPPort + DataLength uint16 + Seq, Ack, Checksum uint32 + VariableHeaderArea []byte + // RUDPHeaderSyn contains SYN information for the RUDP packet, + // if the SYN flag is set + *RUDPHeaderSYN + // RUDPHeaderEack contains EACK information for the RUDP packet, + // if the EACK flag is set. + *RUDPHeaderEACK +} + +type RUDPHeaderSYN struct { + MaxOutstandingSegments, MaxSegmentSize, OptionFlags uint16 +} + +type RUDPHeaderEACK struct { + SeqsReceivedOK []uint32 +} + +// LayerType returns gopacket.LayerTypeRUDP. +func (r *RUDP) LayerType() gopacket.LayerType { return LayerTypeRUDP } + +func decodeRUDP(data []byte, p gopacket.PacketBuilder) error { + r := &RUDP{ + SYN: data[0]&0x80 != 0, + ACK: data[0]&0x40 != 0, + EACK: data[0]&0x20 != 0, + RST: data[0]&0x10 != 0, + NUL: data[0]&0x08 != 0, + Version: data[0] & 0x3, + HeaderLength: data[1], + SrcPort: RUDPPort(data[2]), + DstPort: RUDPPort(data[3]), + DataLength: binary.BigEndian.Uint16(data[4:6]), + Seq: binary.BigEndian.Uint32(data[6:10]), + Ack: binary.BigEndian.Uint32(data[10:14]), + Checksum: binary.BigEndian.Uint32(data[14:18]), + } + if r.HeaderLength < 9 { + return fmt.Errorf("RUDP packet with too-short header length %d", r.HeaderLength) + } + hlen := int(r.HeaderLength) * 2 + r.Contents = data[:hlen] + r.Payload = data[hlen : hlen+int(r.DataLength)] + r.VariableHeaderArea = data[18:hlen] + headerData := r.VariableHeaderArea + switch { + case r.SYN: + if len(headerData) != 6 { + return fmt.Errorf("RUDP packet invalid SYN header length: %d", len(headerData)) + } + r.RUDPHeaderSYN = &RUDPHeaderSYN{ + MaxOutstandingSegments: binary.BigEndian.Uint16(headerData[:2]), + MaxSegmentSize: binary.BigEndian.Uint16(headerData[2:4]), + OptionFlags: binary.BigEndian.Uint16(headerData[4:6]), + } + case r.EACK: + if len(headerData)%4 != 0 { + return fmt.Errorf("RUDP packet invalid EACK header length: %d", len(headerData)) + } + r.RUDPHeaderEACK = &RUDPHeaderEACK{make([]uint32, len(headerData)/4)} + for i := 0; i < len(headerData); i += 4 { + r.SeqsReceivedOK[i/4] = binary.BigEndian.Uint32(headerData[i : i+4]) + } + } + p.AddLayer(r) + p.SetTransportLayer(r) + return p.NextDecoder(gopacket.LayerTypePayload) +} + +func (r *RUDP) TransportFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointRUDPPort, []byte{byte(r.SrcPort)}, []byte{byte(r.DstPort)}) +} diff --git a/vendor/github.com/google/gopacket/layers/sctp.go b/vendor/github.com/google/gopacket/layers/sctp.go new file mode 100644 index 0000000000..511176e560 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/sctp.go @@ -0,0 +1,746 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + + "github.com/google/gopacket" +) + +// SCTP contains information on the top level of an SCTP packet. +type SCTP struct { + BaseLayer + SrcPort, DstPort SCTPPort + VerificationTag uint32 + Checksum uint32 + sPort, dPort []byte +} + +// LayerType returns gopacket.LayerTypeSCTP +func (s *SCTP) LayerType() gopacket.LayerType { return LayerTypeSCTP } + +func decodeSCTP(data []byte, p gopacket.PacketBuilder) error { + sctp := &SCTP{} + err := sctp.DecodeFromBytes(data, p) + p.AddLayer(sctp) + p.SetTransportLayer(sctp) + if err != nil { + return err + } + return p.NextDecoder(sctpChunkTypePrefixDecoder) +} + +var sctpChunkTypePrefixDecoder = gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix) + +// TransportFlow returns a flow based on the source and destination SCTP port. +func (s *SCTP) TransportFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointSCTPPort, s.sPort, s.dPort) +} + +func decodeWithSCTPChunkTypePrefix(data []byte, p gopacket.PacketBuilder) error { + chunkType := SCTPChunkType(data[0]) + return chunkType.Decode(data, p) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (s SCTP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(12) + if err != nil { + return err + } + binary.BigEndian.PutUint16(bytes[0:2], uint16(s.SrcPort)) + binary.BigEndian.PutUint16(bytes[2:4], uint16(s.DstPort)) + binary.BigEndian.PutUint32(bytes[4:8], s.VerificationTag) + if opts.ComputeChecksums { + // Note: MakeTable(Castagnoli) actually only creates the table once, then + // passes back a singleton on every other call, so this shouldn't cause + // excessive memory allocation. + binary.LittleEndian.PutUint32(bytes[8:12], crc32.Checksum(b.Bytes(), crc32.MakeTable(crc32.Castagnoli))) + } + return nil +} + +func (sctp *SCTP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 12 { + return errors.New("Invalid SCTP common header length") + } + sctp.SrcPort = SCTPPort(binary.BigEndian.Uint16(data[:2])) + sctp.sPort = data[:2] + sctp.DstPort = SCTPPort(binary.BigEndian.Uint16(data[2:4])) + sctp.dPort = data[2:4] + sctp.VerificationTag = binary.BigEndian.Uint32(data[4:8]) + sctp.Checksum = binary.BigEndian.Uint32(data[8:12]) + sctp.BaseLayer = BaseLayer{data[:12], data[12:]} + + return nil +} + +func (t *SCTP) CanDecode() gopacket.LayerClass { + return LayerTypeSCTP +} + +func (t *SCTP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// SCTPChunk contains the common fields in all SCTP chunks. +type SCTPChunk struct { + BaseLayer + Type SCTPChunkType + Flags uint8 + Length uint16 + // ActualLength is the total length of an SCTP chunk, including padding. + // SCTP chunks start and end on 4-byte boundaries. So if a chunk has a length + // of 18, it means that it has data up to and including byte 18, then padding + // up to the next 4-byte boundary, 20. In this case, Length would be 18, and + // ActualLength would be 20. + ActualLength int +} + +func roundUpToNearest4(i int) int { + if i%4 == 0 { + return i + } + return i + 4 - (i % 4) +} + +func decodeSCTPChunk(data []byte) (SCTPChunk, error) { + length := binary.BigEndian.Uint16(data[2:4]) + if length < 4 { + return SCTPChunk{}, errors.New("invalid SCTP chunk length") + } + actual := roundUpToNearest4(int(length)) + ct := SCTPChunkType(data[0]) + + // For SCTP Data, use a separate layer for the payload + delta := 0 + if ct == SCTPChunkTypeData { + delta = int(actual) - int(length) + actual = 16 + } + + return SCTPChunk{ + Type: ct, + Flags: data[1], + Length: length, + ActualLength: actual, + BaseLayer: BaseLayer{data[:actual], data[actual : len(data)-delta]}, + }, nil +} + +// SCTPParameter is a TLV parameter inside a SCTPChunk. +type SCTPParameter struct { + Type uint16 + Length uint16 + ActualLength int + Value []byte +} + +func decodeSCTPParameter(data []byte) SCTPParameter { + length := binary.BigEndian.Uint16(data[2:4]) + return SCTPParameter{ + Type: binary.BigEndian.Uint16(data[0:2]), + Length: length, + Value: data[4:length], + ActualLength: roundUpToNearest4(int(length)), + } +} + +func (p SCTPParameter) Bytes() []byte { + length := 4 + len(p.Value) + data := make([]byte, roundUpToNearest4(length)) + binary.BigEndian.PutUint16(data[0:2], p.Type) + binary.BigEndian.PutUint16(data[2:4], uint16(length)) + copy(data[4:], p.Value) + return data +} + +// SCTPUnknownChunkType is the layer type returned when we don't recognize the +// chunk type. Since there's a length in a known location, we can skip over +// it even if we don't know what it is, and continue parsing the rest of the +// chunks. This chunk is stored as an ErrorLayer in the packet. +type SCTPUnknownChunkType struct { + SCTPChunk + bytes []byte +} + +func decodeSCTPChunkTypeUnknown(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPUnknownChunkType{SCTPChunk: chunk} + sc.bytes = data[:sc.ActualLength] + p.AddLayer(sc) + p.SetErrorLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (s SCTPUnknownChunkType) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(s.ActualLength) + if err != nil { + return err + } + copy(bytes, s.bytes) + return nil +} + +// LayerType returns gopacket.LayerTypeSCTPUnknownChunkType. +func (s *SCTPUnknownChunkType) LayerType() gopacket.LayerType { return LayerTypeSCTPUnknownChunkType } + +// Payload returns all bytes in this header, including the decoded Type, Length, +// and Flags. +func (s *SCTPUnknownChunkType) Payload() []byte { return s.bytes } + +// Error implements ErrorLayer. +func (s *SCTPUnknownChunkType) Error() error { + return fmt.Errorf("No decode method available for SCTP chunk type %s", s.Type) +} + +// SCTPData is the SCTP Data chunk layer. +type SCTPData struct { + SCTPChunk + Unordered, BeginFragment, EndFragment bool + TSN uint32 + StreamId uint16 + StreamSequence uint16 + PayloadProtocol SCTPPayloadProtocol +} + +// LayerType returns gopacket.LayerTypeSCTPData. +func (s *SCTPData) LayerType() gopacket.LayerType { return LayerTypeSCTPData } + +// SCTPPayloadProtocol represents a payload protocol +type SCTPPayloadProtocol uint32 + +// SCTPPayloadProtocol constonts from http://www.iana.org/assignments/sctp-parameters/sctp-parameters.xhtml +const ( + SCTPProtocolReserved SCTPPayloadProtocol = 0 + SCTPPayloadUIA = 1 + SCTPPayloadM2UA = 2 + SCTPPayloadM3UA = 3 + SCTPPayloadSUA = 4 + SCTPPayloadM2PA = 5 + SCTPPayloadV5UA = 6 + SCTPPayloadH248 = 7 + SCTPPayloadBICC = 8 + SCTPPayloadTALI = 9 + SCTPPayloadDUA = 10 + SCTPPayloadASAP = 11 + SCTPPayloadENRP = 12 + SCTPPayloadH323 = 13 + SCTPPayloadQIPC = 14 + SCTPPayloadSIMCO = 15 + SCTPPayloadDDPSegment = 16 + SCTPPayloadDDPStream = 17 + SCTPPayloadS1AP = 18 +) + +func (p SCTPPayloadProtocol) String() string { + switch p { + case SCTPProtocolReserved: + return "Reserved" + case SCTPPayloadUIA: + return "UIA" + case SCTPPayloadM2UA: + return "M2UA" + case SCTPPayloadM3UA: + return "M3UA" + case SCTPPayloadSUA: + return "SUA" + case SCTPPayloadM2PA: + return "M2PA" + case SCTPPayloadV5UA: + return "V5UA" + case SCTPPayloadH248: + return "H.248" + case SCTPPayloadBICC: + return "BICC" + case SCTPPayloadTALI: + return "TALI" + case SCTPPayloadDUA: + return "DUA" + case SCTPPayloadASAP: + return "ASAP" + case SCTPPayloadENRP: + return "ENRP" + case SCTPPayloadH323: + return "H.323" + case SCTPPayloadQIPC: + return "QIPC" + case SCTPPayloadSIMCO: + return "SIMCO" + case SCTPPayloadDDPSegment: + return "DDPSegment" + case SCTPPayloadDDPStream: + return "DDPStream" + case SCTPPayloadS1AP: + return "S1AP" + } + return fmt.Sprintf("Unknown(%d)", p) +} + +func decodeSCTPData(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPData{ + SCTPChunk: chunk, + Unordered: data[1]&0x4 != 0, + BeginFragment: data[1]&0x2 != 0, + EndFragment: data[1]&0x1 != 0, + TSN: binary.BigEndian.Uint32(data[4:8]), + StreamId: binary.BigEndian.Uint16(data[8:10]), + StreamSequence: binary.BigEndian.Uint16(data[10:12]), + PayloadProtocol: SCTPPayloadProtocol(binary.BigEndian.Uint32(data[12:16])), + } + // Length is the length in bytes of the data, INCLUDING the 16-byte header. + p.AddLayer(sc) + return p.NextDecoder(gopacket.LayerTypePayload) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPData) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + payload := b.Bytes() + // Pad the payload to a 32 bit boundary + if rem := len(payload) % 4; rem != 0 { + b.AppendBytes(4 - rem) + } + length := 16 + bytes, err := b.PrependBytes(length) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + flags := uint8(0) + if sc.Unordered { + flags |= 0x4 + } + if sc.BeginFragment { + flags |= 0x2 + } + if sc.EndFragment { + flags |= 0x1 + } + bytes[1] = flags + binary.BigEndian.PutUint16(bytes[2:4], uint16(length+len(payload))) + binary.BigEndian.PutUint32(bytes[4:8], sc.TSN) + binary.BigEndian.PutUint16(bytes[8:10], sc.StreamId) + binary.BigEndian.PutUint16(bytes[10:12], sc.StreamSequence) + binary.BigEndian.PutUint32(bytes[12:16], uint32(sc.PayloadProtocol)) + return nil +} + +// SCTPInitParameter is a parameter for an SCTP Init or InitAck packet. +type SCTPInitParameter SCTPParameter + +// SCTPInit is used as the return value for both SCTPInit and SCTPInitAck +// messages. +type SCTPInit struct { + SCTPChunk + InitiateTag uint32 + AdvertisedReceiverWindowCredit uint32 + OutboundStreams, InboundStreams uint16 + InitialTSN uint32 + Parameters []SCTPInitParameter +} + +// LayerType returns either gopacket.LayerTypeSCTPInit or gopacket.LayerTypeSCTPInitAck. +func (sc *SCTPInit) LayerType() gopacket.LayerType { + if sc.Type == SCTPChunkTypeInitAck { + return LayerTypeSCTPInitAck + } + // sc.Type == SCTPChunkTypeInit + return LayerTypeSCTPInit +} + +func decodeSCTPInit(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPInit{ + SCTPChunk: chunk, + InitiateTag: binary.BigEndian.Uint32(data[4:8]), + AdvertisedReceiverWindowCredit: binary.BigEndian.Uint32(data[8:12]), + OutboundStreams: binary.BigEndian.Uint16(data[12:14]), + InboundStreams: binary.BigEndian.Uint16(data[14:16]), + InitialTSN: binary.BigEndian.Uint32(data[16:20]), + } + paramData := data[20:sc.ActualLength] + for len(paramData) > 0 { + p := SCTPInitParameter(decodeSCTPParameter(paramData)) + paramData = paramData[p.ActualLength:] + sc.Parameters = append(sc.Parameters, p) + } + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPInit) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var payload []byte + for _, param := range sc.Parameters { + payload = append(payload, SCTPParameter(param).Bytes()...) + } + length := 20 + len(payload) + bytes, err := b.PrependBytes(roundUpToNearest4(length)) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], uint16(length)) + binary.BigEndian.PutUint32(bytes[4:8], sc.InitiateTag) + binary.BigEndian.PutUint32(bytes[8:12], sc.AdvertisedReceiverWindowCredit) + binary.BigEndian.PutUint16(bytes[12:14], sc.OutboundStreams) + binary.BigEndian.PutUint16(bytes[14:16], sc.InboundStreams) + binary.BigEndian.PutUint32(bytes[16:20], sc.InitialTSN) + copy(bytes[20:], payload) + return nil +} + +// SCTPSack is the SCTP Selective ACK chunk layer. +type SCTPSack struct { + SCTPChunk + CumulativeTSNAck uint32 + AdvertisedReceiverWindowCredit uint32 + NumGapACKs, NumDuplicateTSNs uint16 + GapACKs []uint16 + DuplicateTSNs []uint32 +} + +// LayerType return LayerTypeSCTPSack +func (sc *SCTPSack) LayerType() gopacket.LayerType { + return LayerTypeSCTPSack +} + +func decodeSCTPSack(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPSack{ + SCTPChunk: chunk, + CumulativeTSNAck: binary.BigEndian.Uint32(data[4:8]), + AdvertisedReceiverWindowCredit: binary.BigEndian.Uint32(data[8:12]), + NumGapACKs: binary.BigEndian.Uint16(data[12:14]), + NumDuplicateTSNs: binary.BigEndian.Uint16(data[14:16]), + } + // We maximize gapAcks and dupTSNs here so we're not allocating tons + // of memory based on a user-controlable field. Our maximums are not exact, + // but should give us sane defaults... we'll still hit slice boundaries and + // fail if the user-supplied values are too high (in the for loops below), but + // the amount of memory we'll have allocated because of that should be small + // (< sc.ActualLength) + gapAcks := sc.SCTPChunk.ActualLength / 2 + dupTSNs := (sc.SCTPChunk.ActualLength - gapAcks*2) / 4 + if gapAcks > int(sc.NumGapACKs) { + gapAcks = int(sc.NumGapACKs) + } + if dupTSNs > int(sc.NumDuplicateTSNs) { + dupTSNs = int(sc.NumDuplicateTSNs) + } + sc.GapACKs = make([]uint16, 0, gapAcks) + sc.DuplicateTSNs = make([]uint32, 0, dupTSNs) + bytesRemaining := data[16:] + for i := 0; i < int(sc.NumGapACKs); i++ { + sc.GapACKs = append(sc.GapACKs, binary.BigEndian.Uint16(bytesRemaining[:2])) + bytesRemaining = bytesRemaining[2:] + } + for i := 0; i < int(sc.NumDuplicateTSNs); i++ { + sc.DuplicateTSNs = append(sc.DuplicateTSNs, binary.BigEndian.Uint32(bytesRemaining[:4])) + bytesRemaining = bytesRemaining[4:] + } + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPSack) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + length := 16 + 2*len(sc.GapACKs) + 4*len(sc.DuplicateTSNs) + bytes, err := b.PrependBytes(roundUpToNearest4(length)) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], uint16(length)) + binary.BigEndian.PutUint32(bytes[4:8], sc.CumulativeTSNAck) + binary.BigEndian.PutUint32(bytes[8:12], sc.AdvertisedReceiverWindowCredit) + binary.BigEndian.PutUint16(bytes[12:14], uint16(len(sc.GapACKs))) + binary.BigEndian.PutUint16(bytes[14:16], uint16(len(sc.DuplicateTSNs))) + for i, v := range sc.GapACKs { + binary.BigEndian.PutUint16(bytes[16+i*2:], v) + } + offset := 16 + 2*len(sc.GapACKs) + for i, v := range sc.DuplicateTSNs { + binary.BigEndian.PutUint32(bytes[offset+i*4:], v) + } + return nil +} + +// SCTPHeartbeatParameter is the parameter type used by SCTP heartbeat and +// heartbeat ack layers. +type SCTPHeartbeatParameter SCTPParameter + +// SCTPHeartbeat is the SCTP heartbeat layer, also used for heatbeat ack. +type SCTPHeartbeat struct { + SCTPChunk + Parameters []SCTPHeartbeatParameter +} + +// LayerType returns gopacket.LayerTypeSCTPHeartbeat. +func (sc *SCTPHeartbeat) LayerType() gopacket.LayerType { + if sc.Type == SCTPChunkTypeHeartbeatAck { + return LayerTypeSCTPHeartbeatAck + } + // sc.Type == SCTPChunkTypeHeartbeat + return LayerTypeSCTPHeartbeat +} + +func decodeSCTPHeartbeat(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPHeartbeat{ + SCTPChunk: chunk, + } + paramData := data[4:sc.Length] + for len(paramData) > 0 { + p := SCTPHeartbeatParameter(decodeSCTPParameter(paramData)) + paramData = paramData[p.ActualLength:] + sc.Parameters = append(sc.Parameters, p) + } + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPHeartbeat) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var payload []byte + for _, param := range sc.Parameters { + payload = append(payload, SCTPParameter(param).Bytes()...) + } + length := 4 + len(payload) + + bytes, err := b.PrependBytes(roundUpToNearest4(length)) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], uint16(length)) + copy(bytes[4:], payload) + return nil +} + +// SCTPErrorParameter is the parameter type used by SCTP Abort and Error layers. +type SCTPErrorParameter SCTPParameter + +// SCTPError is the SCTP error layer, also used for SCTP aborts. +type SCTPError struct { + SCTPChunk + Parameters []SCTPErrorParameter +} + +// LayerType returns LayerTypeSCTPAbort or LayerTypeSCTPError. +func (sc *SCTPError) LayerType() gopacket.LayerType { + if sc.Type == SCTPChunkTypeAbort { + return LayerTypeSCTPAbort + } + // sc.Type == SCTPChunkTypeError + return LayerTypeSCTPError +} + +func decodeSCTPError(data []byte, p gopacket.PacketBuilder) error { + // remarkably similar to decodeSCTPHeartbeat ;) + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPError{ + SCTPChunk: chunk, + } + paramData := data[4:sc.Length] + for len(paramData) > 0 { + p := SCTPErrorParameter(decodeSCTPParameter(paramData)) + paramData = paramData[p.ActualLength:] + sc.Parameters = append(sc.Parameters, p) + } + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPError) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var payload []byte + for _, param := range sc.Parameters { + payload = append(payload, SCTPParameter(param).Bytes()...) + } + length := 4 + len(payload) + + bytes, err := b.PrependBytes(roundUpToNearest4(length)) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], uint16(length)) + copy(bytes[4:], payload) + return nil +} + +// SCTPShutdown is the SCTP shutdown layer. +type SCTPShutdown struct { + SCTPChunk + CumulativeTSNAck uint32 +} + +// LayerType returns gopacket.LayerTypeSCTPShutdown. +func (sc *SCTPShutdown) LayerType() gopacket.LayerType { return LayerTypeSCTPShutdown } + +func decodeSCTPShutdown(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPShutdown{ + SCTPChunk: chunk, + CumulativeTSNAck: binary.BigEndian.Uint32(data[4:8]), + } + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPShutdown) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(8) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], 8) + binary.BigEndian.PutUint32(bytes[4:8], sc.CumulativeTSNAck) + return nil +} + +// SCTPShutdownAck is the SCTP shutdown layer. +type SCTPShutdownAck struct { + SCTPChunk +} + +// LayerType returns gopacket.LayerTypeSCTPShutdownAck. +func (sc *SCTPShutdownAck) LayerType() gopacket.LayerType { return LayerTypeSCTPShutdownAck } + +func decodeSCTPShutdownAck(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPShutdownAck{ + SCTPChunk: chunk, + } + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPShutdownAck) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(4) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], 4) + return nil +} + +// SCTPCookieEcho is the SCTP Cookie Echo layer. +type SCTPCookieEcho struct { + SCTPChunk + Cookie []byte +} + +// LayerType returns gopacket.LayerTypeSCTPCookieEcho. +func (sc *SCTPCookieEcho) LayerType() gopacket.LayerType { return LayerTypeSCTPCookieEcho } + +func decodeSCTPCookieEcho(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPCookieEcho{ + SCTPChunk: chunk, + } + sc.Cookie = data[4:sc.Length] + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPCookieEcho) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + length := 4 + len(sc.Cookie) + bytes, err := b.PrependBytes(roundUpToNearest4(length)) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], uint16(length)) + copy(bytes[4:], sc.Cookie) + return nil +} + +// This struct is used by all empty SCTP chunks (currently CookieAck and +// ShutdownComplete). +type SCTPEmptyLayer struct { + SCTPChunk +} + +// LayerType returns either gopacket.LayerTypeSCTPShutdownComplete or +// LayerTypeSCTPCookieAck. +func (sc *SCTPEmptyLayer) LayerType() gopacket.LayerType { + if sc.Type == SCTPChunkTypeShutdownComplete { + return LayerTypeSCTPShutdownComplete + } + // sc.Type == SCTPChunkTypeCookieAck + return LayerTypeSCTPCookieAck +} + +func decodeSCTPEmptyLayer(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPEmptyLayer{ + SCTPChunk: chunk, + } + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPEmptyLayer) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(4) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], 4) + return nil +} diff --git a/vendor/github.com/google/gopacket/layers/sflow.go b/vendor/github.com/google/gopacket/layers/sflow.go new file mode 100644 index 0000000000..bc1c9733ba --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/sflow.go @@ -0,0 +1,2567 @@ +// Copyright 2014 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +/* +This layer decodes SFlow version 5 datagrams. + +The specification can be found here: http://sflow.org/sflow_version_5.txt + +Additional developer information about sflow can be found at: +http://sflow.org/developers/specifications.php + +And SFlow in general: +http://sflow.org/index.php + +Two forms of sample data are defined: compact and expanded. The +Specification has this to say: + + Compact and expand forms of counter and flow samples are defined. + An agent must not mix compact/expanded encodings. If an agent + will never use ifIndex numbers >= 2^24 then it must use compact + encodings for all interfaces. Otherwise the expanded formats must + be used for all interfaces. + +This decoder only supports the compact form, because that is the only +one for which data was available. + +The datagram is composed of one or more samples of type flow or counter, +and each sample is composed of one or more records describing the sample. +A sample is a single instance of sampled inforamtion, and each record in +the sample gives additional / supplimentary information about the sample. + +The following sample record types are supported: + + Raw Packet Header + opaque = flow_data; enterprise = 0; format = 1 + + Extended Switch Data + opaque = flow_data; enterprise = 0; format = 1001 + + Extended Router Data + opaque = flow_data; enterprise = 0; format = 1002 + + Extended Gateway Data + opaque = flow_data; enterprise = 0; format = 1003 + + Extended User Data + opaque = flow_data; enterprise = 0; format = 1004 + + Extended URL Data + opaque = flow_data; enterprise = 0; format = 1005 + +The following types of counter records are supported: + + Generic Interface Counters - see RFC 2233 + opaque = counter_data; enterprise = 0; format = 1 + + Ethernet Interface Counters - see RFC 2358 + opaque = counter_data; enterprise = 0; format = 2 + +SFlow is encoded using XDR (RFC4506). There are a few places +where the standard 4-byte fields are partitioned into two +bitfields of different lengths. I'm not sure why the designers +chose to pack together two values like this in some places, and +in others they use the entire 4-byte value to store a number that +will never be more than a few bits. In any case, there are a couple +of types defined to handle the decoding of these bitfields, and +that's why they're there. */ + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + + "github.com/google/gopacket" +) + +// SFlowRecord holds both flow sample records and counter sample records. +// A Record is the structure that actually holds the sampled data +// and / or counters. +type SFlowRecord interface { +} + +// SFlowDataSource encodes a 2-bit SFlowSourceFormat in its most significant +// 2 bits, and an SFlowSourceValue in its least significant 30 bits. +// These types and values define the meaning of the inteface information +// presented in the sample metadata. +type SFlowDataSource int32 + +func (sdc SFlowDataSource) decode() (SFlowSourceFormat, SFlowSourceValue) { + leftField := sdc >> 30 + rightField := uint32(0x3FFFFFFF) & uint32(sdc) + return SFlowSourceFormat(leftField), SFlowSourceValue(rightField) +} + +type SFlowDataSourceExpanded struct { + SourceIDClass SFlowSourceFormat + SourceIDIndex SFlowSourceValue +} + +func (sdce SFlowDataSourceExpanded) decode() (SFlowSourceFormat, SFlowSourceValue) { + leftField := sdce.SourceIDClass >> 30 + rightField := uint32(0x3FFFFFFF) & uint32(sdce.SourceIDIndex) + return SFlowSourceFormat(leftField), SFlowSourceValue(rightField) +} + +type SFlowSourceFormat uint32 + +type SFlowSourceValue uint32 + +const ( + SFlowTypeSingleInterface SFlowSourceFormat = 0 + SFlowTypePacketDiscarded SFlowSourceFormat = 1 + SFlowTypeMultipleDestinations SFlowSourceFormat = 2 +) + +func (sdf SFlowSourceFormat) String() string { + switch sdf { + case SFlowTypeSingleInterface: + return "Single Interface" + case SFlowTypePacketDiscarded: + return "Packet Discarded" + case SFlowTypeMultipleDestinations: + return "Multiple Destinations" + default: + return "UNKNOWN" + } +} + +func decodeSFlow(data []byte, p gopacket.PacketBuilder) error { + s := &SFlowDatagram{} + err := s.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(s) + p.SetApplicationLayer(s) + return nil +} + +// SFlowDatagram is the outermost container which holds some basic information +// about the reporting agent, and holds at least one sample record +type SFlowDatagram struct { + BaseLayer + + DatagramVersion uint32 + AgentAddress net.IP + SubAgentID uint32 + SequenceNumber uint32 + AgentUptime uint32 + SampleCount uint32 + FlowSamples []SFlowFlowSample + CounterSamples []SFlowCounterSample +} + +// An SFlow datagram's outer container has the following +// structure: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sFlow version (2|4|5) | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int IP version of the Agent (1=v4|2=v6) | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Agent IP address (v4=4byte|v6=16byte) / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sub agent id | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int datagram sequence number | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int switch uptime in ms | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int n samples in datagram | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / n samples / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +// SFlowDataFormat encodes the EnterpriseID in the most +// significant 12 bits, and the SampleType in the least significant +// 20 bits. +type SFlowDataFormat uint32 + +func (sdf SFlowDataFormat) decode() (SFlowEnterpriseID, SFlowSampleType) { + leftField := sdf >> 12 + rightField := uint32(0xFFF) & uint32(sdf) + return SFlowEnterpriseID(leftField), SFlowSampleType(rightField) +} + +// SFlowEnterpriseID is used to differentiate between the +// official SFlow standard, and other, vendor-specific +// types of flow data. (Similiar to SNMP's enterprise MIB +// OIDs) Only the office SFlow Enterprise ID is decoded +// here. +type SFlowEnterpriseID uint32 + +const ( + SFlowStandard SFlowEnterpriseID = 0 +) + +func (eid SFlowEnterpriseID) String() string { + switch eid { + case SFlowStandard: + return "Standard SFlow" + default: + return "" + } +} + +func (eid SFlowEnterpriseID) GetType() SFlowEnterpriseID { + return SFlowStandard +} + +// SFlowSampleType specifies the type of sample. Only flow samples +// and counter samples are supported +type SFlowSampleType uint32 + +const ( + SFlowTypeFlowSample SFlowSampleType = 1 + SFlowTypeCounterSample SFlowSampleType = 2 + SFlowTypeExpandedFlowSample SFlowSampleType = 3 + SFlowTypeExpandedCounterSample SFlowSampleType = 4 +) + +func (st SFlowSampleType) GetType() SFlowSampleType { + switch st { + case SFlowTypeFlowSample: + return SFlowTypeFlowSample + case SFlowTypeCounterSample: + return SFlowTypeCounterSample + case SFlowTypeExpandedFlowSample: + return SFlowTypeExpandedFlowSample + case SFlowTypeExpandedCounterSample: + return SFlowTypeExpandedCounterSample + default: + panic("Invalid Sample Type") + } +} + +func (st SFlowSampleType) String() string { + switch st { + case SFlowTypeFlowSample: + return "Flow Sample" + case SFlowTypeCounterSample: + return "Counter Sample" + case SFlowTypeExpandedFlowSample: + return "Expanded Flow Sample" + case SFlowTypeExpandedCounterSample: + return "Expanded Counter Sample" + default: + return "" + } +} + +func (s *SFlowDatagram) LayerType() gopacket.LayerType { return LayerTypeSFlow } + +func (d *SFlowDatagram) Payload() []byte { return nil } + +func (d *SFlowDatagram) CanDecode() gopacket.LayerClass { return LayerTypeSFlow } + +func (d *SFlowDatagram) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload } + +// SFlowIPType determines what form the IP address being decoded will +// take. This is an XDR union type allowing for both IPv4 and IPv6 +type SFlowIPType uint32 + +const ( + SFlowIPv4 SFlowIPType = 1 + SFlowIPv6 SFlowIPType = 2 +) + +func (s SFlowIPType) String() string { + switch s { + case SFlowIPv4: + return "IPv4" + case SFlowIPv6: + return "IPv6" + default: + return "" + } +} + +func (s SFlowIPType) Length() int { + switch s { + case SFlowIPv4: + return 4 + case SFlowIPv6: + return 16 + default: + return 0 + } +} + +func (s *SFlowDatagram) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + var agentAddressType SFlowIPType + + data, s.DatagramVersion = data[4:], binary.BigEndian.Uint32(data[:4]) + data, agentAddressType = data[4:], SFlowIPType(binary.BigEndian.Uint32(data[:4])) + data, s.AgentAddress = data[agentAddressType.Length():], data[:agentAddressType.Length()] + data, s.SubAgentID = data[4:], binary.BigEndian.Uint32(data[:4]) + data, s.SequenceNumber = data[4:], binary.BigEndian.Uint32(data[:4]) + data, s.AgentUptime = data[4:], binary.BigEndian.Uint32(data[:4]) + data, s.SampleCount = data[4:], binary.BigEndian.Uint32(data[:4]) + + if s.SampleCount < 1 { + return fmt.Errorf("SFlow Datagram has invalid sample length: %d", s.SampleCount) + } + for i := uint32(0); i < s.SampleCount; i++ { + sdf := SFlowDataFormat(binary.BigEndian.Uint32(data[:4])) + _, sampleType := sdf.decode() + switch sampleType { + case SFlowTypeFlowSample: + if flowSample, err := decodeFlowSample(&data, false); err == nil { + s.FlowSamples = append(s.FlowSamples, flowSample) + } else { + return err + } + case SFlowTypeCounterSample: + if counterSample, err := decodeCounterSample(&data, false); err == nil { + s.CounterSamples = append(s.CounterSamples, counterSample) + } else { + return err + } + case SFlowTypeExpandedFlowSample: + if flowSample, err := decodeFlowSample(&data, true); err == nil { + s.FlowSamples = append(s.FlowSamples, flowSample) + } else { + return err + } + case SFlowTypeExpandedCounterSample: + if counterSample, err := decodeCounterSample(&data, true); err == nil { + s.CounterSamples = append(s.CounterSamples, counterSample) + } else { + return err + } + + default: + return fmt.Errorf("Unsupported SFlow sample type %d", sampleType) + } + } + return nil +} + +// SFlowFlowSample represents a sampled packet and contains +// one or more records describing the packet +type SFlowFlowSample struct { + EnterpriseID SFlowEnterpriseID + Format SFlowSampleType + SampleLength uint32 + SequenceNumber uint32 + SourceIDClass SFlowSourceFormat + SourceIDIndex SFlowSourceValue + SamplingRate uint32 + SamplePool uint32 + Dropped uint32 + InputInterfaceFormat uint32 + InputInterface uint32 + OutputInterfaceFormat uint32 + OutputInterface uint32 + RecordCount uint32 + Records []SFlowRecord +} + +// Flow samples have the following structure. Note +// the bit fields to encode the Enterprise ID and the +// Flow record format: type 1 + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | sample length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sample sequence number | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// |id type | src id index value | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sampling rate | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sample pool | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int drops | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int input ifIndex | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int output ifIndex | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int number of records | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / flow records / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +// Flow samples have the following structure. +// Flow record format: type 3 + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | sample length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sample sequence number | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int src id type | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int src id index value | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sampling rate | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sample pool | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int drops | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int input interface format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int input interface value | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int output interface format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int output interface value | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int number of records | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / flow records / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowFlowDataFormat uint32 + +func (fdf SFlowFlowDataFormat) decode() (SFlowEnterpriseID, SFlowFlowRecordType) { + leftField := fdf >> 12 + rightField := uint32(0xFFF) & uint32(fdf) + return SFlowEnterpriseID(leftField), SFlowFlowRecordType(rightField) +} + +func (fs SFlowFlowSample) GetRecords() []SFlowRecord { + return fs.Records +} + +func (fs SFlowFlowSample) GetType() SFlowSampleType { + return SFlowTypeFlowSample +} + +func skipRecord(data *[]byte) { + recordLength := int(binary.BigEndian.Uint32((*data)[4:])) + *data = (*data)[(recordLength+((4-recordLength)%4))+8:] +} + +func decodeFlowSample(data *[]byte, expanded bool) (SFlowFlowSample, error) { + s := SFlowFlowSample{} + var sdf SFlowDataFormat + *data, sdf = (*data)[4:], SFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + var sdc SFlowDataSource + + s.EnterpriseID, s.Format = sdf.decode() + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, s.SampleLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, s.SequenceNumber = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if expanded { + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, s.SourceIDClass = (*data)[4:], SFlowSourceFormat(binary.BigEndian.Uint32((*data)[:4])) + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, s.SourceIDIndex = (*data)[4:], SFlowSourceValue(binary.BigEndian.Uint32((*data)[:4])) + } else { + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, sdc = (*data)[4:], SFlowDataSource(binary.BigEndian.Uint32((*data)[:4])) + s.SourceIDClass, s.SourceIDIndex = sdc.decode() + } + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, s.SamplingRate = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, s.SamplePool = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, s.Dropped = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + if expanded { + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, s.InputInterfaceFormat = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, s.InputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, s.OutputInterfaceFormat = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, s.OutputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + } else { + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, s.InputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, s.OutputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + } + if len(*data) < 4 { + return SFlowFlowSample{}, errors.New("ethernet counters too small") + } + *data, s.RecordCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + for i := uint32(0); i < s.RecordCount; i++ { + rdf := SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + enterpriseID, flowRecordType := rdf.decode() + + // Try to decode when EnterpriseID is 0 signaling + // default sflow structs are used according specification + // Unexpected behavior detected for e.g. with pmacct + if enterpriseID == 0 { + switch flowRecordType { + case SFlowTypeRawPacketFlow: + if record, err := decodeRawPacketFlowRecord(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedUserFlow: + if record, err := decodeExtendedUserFlow(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedUrlFlow: + if record, err := decodeExtendedURLRecord(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedSwitchFlow: + if record, err := decodeExtendedSwitchFlowRecord(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedRouterFlow: + if record, err := decodeExtendedRouterFlowRecord(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedGatewayFlow: + if record, err := decodeExtendedGatewayFlowRecord(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeEthernetFrameFlow: + if record, err := decodeEthernetFrameFlowRecord(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeIpv4Flow: + if record, err := decodeSFlowIpv4Record(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeIpv6Flow: + if record, err := decodeSFlowIpv6Record(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedMlpsFlow: + // TODO + skipRecord(data) + return s, errors.New("skipping TypeExtendedMlpsFlow") + case SFlowTypeExtendedNatFlow: + // TODO + skipRecord(data) + return s, errors.New("skipping TypeExtendedNatFlow") + case SFlowTypeExtendedMlpsTunnelFlow: + // TODO + skipRecord(data) + return s, errors.New("skipping TypeExtendedMlpsTunnelFlow") + case SFlowTypeExtendedMlpsVcFlow: + // TODO + skipRecord(data) + return s, errors.New("skipping TypeExtendedMlpsVcFlow") + case SFlowTypeExtendedMlpsFecFlow: + // TODO + skipRecord(data) + return s, errors.New("skipping TypeExtendedMlpsFecFlow") + case SFlowTypeExtendedMlpsLvpFecFlow: + // TODO + skipRecord(data) + return s, errors.New("skipping TypeExtendedMlpsLvpFecFlow") + case SFlowTypeExtendedVlanFlow: + // TODO + skipRecord(data) + return s, errors.New("skipping TypeExtendedVlanFlow") + case SFlowTypeExtendedIpv4TunnelEgressFlow: + if record, err := decodeExtendedIpv4TunnelEgress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedIpv4TunnelIngressFlow: + if record, err := decodeExtendedIpv4TunnelIngress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedIpv6TunnelEgressFlow: + if record, err := decodeExtendedIpv6TunnelEgress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedIpv6TunnelIngressFlow: + if record, err := decodeExtendedIpv6TunnelIngress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedDecapsulateEgressFlow: + if record, err := decodeExtendedDecapsulateEgress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedDecapsulateIngressFlow: + if record, err := decodeExtendedDecapsulateIngress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedVniEgressFlow: + if record, err := decodeExtendedVniEgress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedVniIngressFlow: + if record, err := decodeExtendedVniIngress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + default: + return s, fmt.Errorf("Unsupported flow record type: %d", flowRecordType) + } + } else { + skipRecord(data) + } + } + return s, nil +} + +// Counter samples report information about various counter +// objects. Typically these are items like IfInOctets, or +// CPU / Memory stats, etc. SFlow will report these at regular +// intervals as configured on the agent. If one were sufficiently +// industrious, this could be used to replace the typical +// SNMP polling used for such things. +type SFlowCounterSample struct { + EnterpriseID SFlowEnterpriseID + Format SFlowSampleType + SampleLength uint32 + SequenceNumber uint32 + SourceIDClass SFlowSourceFormat + SourceIDIndex SFlowSourceValue + RecordCount uint32 + Records []SFlowRecord +} + +// Counter samples have the following structure: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sample sequence number | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// |id type | src id index value | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int number of records | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / counter records / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowCounterDataFormat uint32 + +func (cdf SFlowCounterDataFormat) decode() (SFlowEnterpriseID, SFlowCounterRecordType) { + leftField := cdf >> 12 + rightField := uint32(0xFFF) & uint32(cdf) + return SFlowEnterpriseID(leftField), SFlowCounterRecordType(rightField) +} + +// GetRecords will return a slice of interface types +// representing records. A type switch can be used to +// get at the underlying SFlowCounterRecordType. +func (cs SFlowCounterSample) GetRecords() []SFlowRecord { + return cs.Records +} + +// GetType will report the type of sample. Only the +// compact form of counter samples is supported +func (cs SFlowCounterSample) GetType() SFlowSampleType { + return SFlowTypeCounterSample +} + +type SFlowCounterRecordType uint32 + +const ( + SFlowTypeGenericInterfaceCounters SFlowCounterRecordType = 1 + SFlowTypeEthernetInterfaceCounters SFlowCounterRecordType = 2 + SFlowTypeTokenRingInterfaceCounters SFlowCounterRecordType = 3 + SFlowType100BaseVGInterfaceCounters SFlowCounterRecordType = 4 + SFlowTypeVLANCounters SFlowCounterRecordType = 5 + SFlowTypeLACPCounters SFlowCounterRecordType = 7 + SFlowTypeProcessorCounters SFlowCounterRecordType = 1001 + SFlowTypeOpenflowPortCounters SFlowCounterRecordType = 1004 + SFlowTypePORTNAMECounters SFlowCounterRecordType = 1005 + SFLowTypeAPPRESOURCESCounters SFlowCounterRecordType = 2203 + SFlowTypeOVSDPCounters SFlowCounterRecordType = 2207 +) + +func (cr SFlowCounterRecordType) String() string { + switch cr { + case SFlowTypeGenericInterfaceCounters: + return "Generic Interface Counters" + case SFlowTypeEthernetInterfaceCounters: + return "Ethernet Interface Counters" + case SFlowTypeTokenRingInterfaceCounters: + return "Token Ring Interface Counters" + case SFlowType100BaseVGInterfaceCounters: + return "100BaseVG Interface Counters" + case SFlowTypeVLANCounters: + return "VLAN Counters" + case SFlowTypeLACPCounters: + return "LACP Counters" + case SFlowTypeProcessorCounters: + return "Processor Counters" + case SFlowTypeOpenflowPortCounters: + return "Openflow Port Counters" + case SFlowTypePORTNAMECounters: + return "PORT NAME Counters" + case SFLowTypeAPPRESOURCESCounters: + return "App Resources Counters" + case SFlowTypeOVSDPCounters: + return "OVSDP Counters" + default: + return "" + + } +} + +func decodeCounterSample(data *[]byte, expanded bool) (SFlowCounterSample, error) { + s := SFlowCounterSample{} + var sdc SFlowDataSource + var sdce SFlowDataSourceExpanded + var sdf SFlowDataFormat + + *data, sdf = (*data)[4:], SFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + s.EnterpriseID, s.Format = sdf.decode() + *data, s.SampleLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, s.SequenceNumber = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if expanded { + *data, sdce = (*data)[8:], SFlowDataSourceExpanded{SFlowSourceFormat(binary.BigEndian.Uint32((*data)[:4])), SFlowSourceValue(binary.BigEndian.Uint32((*data)[4:8]))} + s.SourceIDClass, s.SourceIDIndex = sdce.decode() + } else { + *data, sdc = (*data)[4:], SFlowDataSource(binary.BigEndian.Uint32((*data)[:4])) + s.SourceIDClass, s.SourceIDIndex = sdc.decode() + } + *data, s.RecordCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + for i := uint32(0); i < s.RecordCount; i++ { + cdf := SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + _, counterRecordType := cdf.decode() + switch counterRecordType { + case SFlowTypeGenericInterfaceCounters: + if record, err := decodeGenericInterfaceCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeEthernetInterfaceCounters: + if record, err := decodeEthernetCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeTokenRingInterfaceCounters: + skipRecord(data) + return s, errors.New("skipping TypeTokenRingInterfaceCounters") + case SFlowType100BaseVGInterfaceCounters: + skipRecord(data) + return s, errors.New("skipping Type100BaseVGInterfaceCounters") + case SFlowTypeVLANCounters: + if record, err := decodeVLANCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeLACPCounters: + if record, err := decodeLACPCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeProcessorCounters: + if record, err := decodeProcessorCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeOpenflowPortCounters: + if record, err := decodeOpenflowportCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypePORTNAMECounters: + if record, err := decodePortnameCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFLowTypeAPPRESOURCESCounters: + if record, err := decodeAppresourcesCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeOVSDPCounters: + if record, err := decodeOVSDPCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + default: + return s, fmt.Errorf("Invalid counter record type: %d", counterRecordType) + } + } + return s, nil +} + +// SFlowBaseFlowRecord holds the fields common to all records +// of type SFlowFlowRecordType +type SFlowBaseFlowRecord struct { + EnterpriseID SFlowEnterpriseID + Format SFlowFlowRecordType + FlowDataLength uint32 +} + +func (bfr SFlowBaseFlowRecord) GetType() SFlowFlowRecordType { + return bfr.Format +} + +// SFlowFlowRecordType denotes what kind of Flow Record is +// represented. See RFC 3176 +type SFlowFlowRecordType uint32 + +const ( + SFlowTypeRawPacketFlow SFlowFlowRecordType = 1 + SFlowTypeEthernetFrameFlow SFlowFlowRecordType = 2 + SFlowTypeIpv4Flow SFlowFlowRecordType = 3 + SFlowTypeIpv6Flow SFlowFlowRecordType = 4 + SFlowTypeExtendedSwitchFlow SFlowFlowRecordType = 1001 + SFlowTypeExtendedRouterFlow SFlowFlowRecordType = 1002 + SFlowTypeExtendedGatewayFlow SFlowFlowRecordType = 1003 + SFlowTypeExtendedUserFlow SFlowFlowRecordType = 1004 + SFlowTypeExtendedUrlFlow SFlowFlowRecordType = 1005 + SFlowTypeExtendedMlpsFlow SFlowFlowRecordType = 1006 + SFlowTypeExtendedNatFlow SFlowFlowRecordType = 1007 + SFlowTypeExtendedMlpsTunnelFlow SFlowFlowRecordType = 1008 + SFlowTypeExtendedMlpsVcFlow SFlowFlowRecordType = 1009 + SFlowTypeExtendedMlpsFecFlow SFlowFlowRecordType = 1010 + SFlowTypeExtendedMlpsLvpFecFlow SFlowFlowRecordType = 1011 + SFlowTypeExtendedVlanFlow SFlowFlowRecordType = 1012 + SFlowTypeExtendedIpv4TunnelEgressFlow SFlowFlowRecordType = 1023 + SFlowTypeExtendedIpv4TunnelIngressFlow SFlowFlowRecordType = 1024 + SFlowTypeExtendedIpv6TunnelEgressFlow SFlowFlowRecordType = 1025 + SFlowTypeExtendedIpv6TunnelIngressFlow SFlowFlowRecordType = 1026 + SFlowTypeExtendedDecapsulateEgressFlow SFlowFlowRecordType = 1027 + SFlowTypeExtendedDecapsulateIngressFlow SFlowFlowRecordType = 1028 + SFlowTypeExtendedVniEgressFlow SFlowFlowRecordType = 1029 + SFlowTypeExtendedVniIngressFlow SFlowFlowRecordType = 1030 +) + +func (rt SFlowFlowRecordType) String() string { + switch rt { + case SFlowTypeRawPacketFlow: + return "Raw Packet Flow Record" + case SFlowTypeEthernetFrameFlow: + return "Ethernet Frame Flow Record" + case SFlowTypeIpv4Flow: + return "IPv4 Flow Record" + case SFlowTypeIpv6Flow: + return "IPv6 Flow Record" + case SFlowTypeExtendedSwitchFlow: + return "Extended Switch Flow Record" + case SFlowTypeExtendedRouterFlow: + return "Extended Router Flow Record" + case SFlowTypeExtendedGatewayFlow: + return "Extended Gateway Flow Record" + case SFlowTypeExtendedUserFlow: + return "Extended User Flow Record" + case SFlowTypeExtendedUrlFlow: + return "Extended URL Flow Record" + case SFlowTypeExtendedMlpsFlow: + return "Extended MPLS Flow Record" + case SFlowTypeExtendedNatFlow: + return "Extended NAT Flow Record" + case SFlowTypeExtendedMlpsTunnelFlow: + return "Extended MPLS Tunnel Flow Record" + case SFlowTypeExtendedMlpsVcFlow: + return "Extended MPLS VC Flow Record" + case SFlowTypeExtendedMlpsFecFlow: + return "Extended MPLS FEC Flow Record" + case SFlowTypeExtendedMlpsLvpFecFlow: + return "Extended MPLS LVP FEC Flow Record" + case SFlowTypeExtendedVlanFlow: + return "Extended VLAN Flow Record" + case SFlowTypeExtendedIpv4TunnelEgressFlow: + return "Extended IPv4 Tunnel Egress Record" + case SFlowTypeExtendedIpv4TunnelIngressFlow: + return "Extended IPv4 Tunnel Ingress Record" + case SFlowTypeExtendedIpv6TunnelEgressFlow: + return "Extended IPv6 Tunnel Egress Record" + case SFlowTypeExtendedIpv6TunnelIngressFlow: + return "Extended IPv6 Tunnel Ingress Record" + case SFlowTypeExtendedDecapsulateEgressFlow: + return "Extended Decapsulate Egress Record" + case SFlowTypeExtendedDecapsulateIngressFlow: + return "Extended Decapsulate Ingress Record" + case SFlowTypeExtendedVniEgressFlow: + return "Extended VNI Ingress Record" + case SFlowTypeExtendedVniIngressFlow: + return "Extended VNI Ingress Record" + default: + return "" + } +} + +// SFlowRawPacketFlowRecords hold information about a sampled +// packet grabbed as it transited the agent. This is +// perhaps the most useful and interesting record type, +// as it holds the headers of the sampled packet and +// can be used to build up a complete picture of the +// traffic patterns on a network. +// +// The raw packet header is sent back into gopacket for +// decoding, and the resulting gopackt.Packet is stored +// in the Header member +type SFlowRawPacketFlowRecord struct { + SFlowBaseFlowRecord + HeaderProtocol SFlowRawHeaderProtocol + FrameLength uint32 + PayloadRemoved uint32 + HeaderLength uint32 + Header gopacket.Packet +} + +// Raw packet record types have the following structure: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Header Protocol | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Frame Length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Payload Removed | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Header Length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// \ Header \ +// \ \ +// \ \ +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowRawHeaderProtocol uint32 + +const ( + SFlowProtoEthernet SFlowRawHeaderProtocol = 1 + SFlowProtoISO88024 SFlowRawHeaderProtocol = 2 + SFlowProtoISO88025 SFlowRawHeaderProtocol = 3 + SFlowProtoFDDI SFlowRawHeaderProtocol = 4 + SFlowProtoFrameRelay SFlowRawHeaderProtocol = 5 + SFlowProtoX25 SFlowRawHeaderProtocol = 6 + SFlowProtoPPP SFlowRawHeaderProtocol = 7 + SFlowProtoSMDS SFlowRawHeaderProtocol = 8 + SFlowProtoAAL5 SFlowRawHeaderProtocol = 9 + SFlowProtoAAL5_IP SFlowRawHeaderProtocol = 10 /* e.g. Cisco AAL5 mux */ + SFlowProtoIPv4 SFlowRawHeaderProtocol = 11 + SFlowProtoIPv6 SFlowRawHeaderProtocol = 12 + SFlowProtoMPLS SFlowRawHeaderProtocol = 13 + SFlowProtoPOS SFlowRawHeaderProtocol = 14 /* RFC 1662, 2615 */ +) + +func (sfhp SFlowRawHeaderProtocol) String() string { + switch sfhp { + case SFlowProtoEthernet: + return "ETHERNET-ISO88023" + case SFlowProtoISO88024: + return "ISO88024-TOKENBUS" + case SFlowProtoISO88025: + return "ISO88025-TOKENRING" + case SFlowProtoFDDI: + return "FDDI" + case SFlowProtoFrameRelay: + return "FRAME-RELAY" + case SFlowProtoX25: + return "X25" + case SFlowProtoPPP: + return "PPP" + case SFlowProtoSMDS: + return "SMDS" + case SFlowProtoAAL5: + return "AAL5" + case SFlowProtoAAL5_IP: + return "AAL5-IP" + case SFlowProtoIPv4: + return "IPv4" + case SFlowProtoIPv6: + return "IPv6" + case SFlowProtoMPLS: + return "MPLS" + case SFlowProtoPOS: + return "POS" + } + return "UNKNOWN" +} + +func decodeRawPacketFlowRecord(data *[]byte) (SFlowRawPacketFlowRecord, error) { + rec := SFlowRawPacketFlowRecord{} + header := []byte{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, rec.HeaderProtocol = (*data)[4:], SFlowRawHeaderProtocol(binary.BigEndian.Uint32((*data)[:4])) + *data, rec.FrameLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, rec.PayloadRemoved = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, rec.HeaderLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + headerLenWithPadding := int(rec.HeaderLength + ((4 - rec.HeaderLength) % 4)) + *data, header = (*data)[headerLenWithPadding:], (*data)[:headerLenWithPadding] + rec.Header = gopacket.NewPacket(header, LayerTypeEthernet, gopacket.Default) + return rec, nil +} + +// SFlowExtendedSwitchFlowRecord give additional information +// about the sampled packet if it's available. It's mainly +// useful for getting at the incoming and outgoing VLANs +// An agent may or may not provide this information. +type SFlowExtendedSwitchFlowRecord struct { + SFlowBaseFlowRecord + IncomingVLAN uint32 + IncomingVLANPriority uint32 + OutgoingVLAN uint32 + OutgoingVLANPriority uint32 +} + +// Extended switch records have the following structure: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Incoming VLAN | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Incoming VLAN Priority | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Outgoing VLAN | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Outgoing VLAN Priority | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +func decodeExtendedSwitchFlowRecord(data *[]byte) (SFlowExtendedSwitchFlowRecord, error) { + es := SFlowExtendedSwitchFlowRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + es.EnterpriseID, es.Format = fdf.decode() + *data, es.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, es.IncomingVLAN = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, es.IncomingVLANPriority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, es.OutgoingVLAN = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, es.OutgoingVLANPriority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + return es, nil +} + +// SFlowExtendedRouterFlowRecord gives additional information +// about the layer 3 routing information used to forward +// the packet +type SFlowExtendedRouterFlowRecord struct { + SFlowBaseFlowRecord + NextHop net.IP + NextHopSourceMask uint32 + NextHopDestinationMask uint32 +} + +// Extended router records have the following structure: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IP version of next hop router (1=v4|2=v6) | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Next Hop address (v4=4byte|v6=16byte) / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Next Hop Source Mask | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Next Hop Destination Mask | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +func decodeExtendedRouterFlowRecord(data *[]byte) (SFlowExtendedRouterFlowRecord, error) { + er := SFlowExtendedRouterFlowRecord{} + var fdf SFlowFlowDataFormat + var extendedRouterAddressType SFlowIPType + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + er.EnterpriseID, er.Format = fdf.decode() + *data, er.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, extendedRouterAddressType = (*data)[4:], SFlowIPType(binary.BigEndian.Uint32((*data)[:4])) + *data, er.NextHop = (*data)[extendedRouterAddressType.Length():], (*data)[:extendedRouterAddressType.Length()] + *data, er.NextHopSourceMask = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, er.NextHopDestinationMask = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + return er, nil +} + +// SFlowExtendedGatewayFlowRecord describes information treasured by +// nework engineers everywhere: AS path information listing which +// BGP peer sent the packet, and various other BGP related info. +// This information is vital because it gives a picture of how much +// traffic is being sent from / received by various BGP peers. + +// Extended gateway records have the following structure: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IP version of next hop router (1=v4|2=v6) | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Next Hop address (v4=4byte|v6=16byte) / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | AS | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source AS | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Peer AS | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | AS Path Count | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / AS Path / Sequence / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Communities / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Local Pref | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +// AS Path / Sequence: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | AS Source Type (Path=1 / Sequence=2) | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Path / Sequence length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Path / Sequence Members / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +// Communities: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | communitiy length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / communitiy Members / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowExtendedGatewayFlowRecord struct { + SFlowBaseFlowRecord + NextHop net.IP + AS uint32 + SourceAS uint32 + PeerAS uint32 + ASPathCount uint32 + ASPath []SFlowASDestination + Communities []uint32 + LocalPref uint32 +} + +type SFlowASPathType uint32 + +const ( + SFlowASSet SFlowASPathType = 1 + SFlowASSequence SFlowASPathType = 2 +) + +func (apt SFlowASPathType) String() string { + switch apt { + case SFlowASSet: + return "AS Set" + case SFlowASSequence: + return "AS Sequence" + default: + return "" + } +} + +type SFlowASDestination struct { + Type SFlowASPathType + Count uint32 + Members []uint32 +} + +func (asd SFlowASDestination) String() string { + switch asd.Type { + case SFlowASSet: + return fmt.Sprint("AS Set:", asd.Members) + case SFlowASSequence: + return fmt.Sprint("AS Sequence:", asd.Members) + default: + return "" + } +} + +func (ad *SFlowASDestination) decodePath(data *[]byte) { + *data, ad.Type = (*data)[4:], SFlowASPathType(binary.BigEndian.Uint32((*data)[:4])) + *data, ad.Count = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + ad.Members = make([]uint32, ad.Count) + for i := uint32(0); i < ad.Count; i++ { + var member uint32 + *data, member = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + ad.Members[i] = member + } +} + +func decodeExtendedGatewayFlowRecord(data *[]byte) (SFlowExtendedGatewayFlowRecord, error) { + eg := SFlowExtendedGatewayFlowRecord{} + var fdf SFlowFlowDataFormat + var extendedGatewayAddressType SFlowIPType + var communitiesLength uint32 + var community uint32 + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + eg.EnterpriseID, eg.Format = fdf.decode() + *data, eg.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, extendedGatewayAddressType = (*data)[4:], SFlowIPType(binary.BigEndian.Uint32((*data)[:4])) + *data, eg.NextHop = (*data)[extendedGatewayAddressType.Length():], (*data)[:extendedGatewayAddressType.Length()] + *data, eg.AS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, eg.SourceAS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, eg.PeerAS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, eg.ASPathCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + for i := uint32(0); i < eg.ASPathCount; i++ { + asPath := SFlowASDestination{} + asPath.decodePath(data) + eg.ASPath = append(eg.ASPath, asPath) + } + *data, communitiesLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + eg.Communities = make([]uint32, communitiesLength) + for j := uint32(0); j < communitiesLength; j++ { + *data, community = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + eg.Communities[j] = community + } + *data, eg.LocalPref = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + return eg, nil +} + +// ************************************************** +// Extended URL Flow Record +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | direction | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | URL | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Host | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowURLDirection uint32 + +const ( + SFlowURLsrc SFlowURLDirection = 1 + SFlowURLdst SFlowURLDirection = 2 +) + +func (urld SFlowURLDirection) String() string { + switch urld { + case SFlowURLsrc: + return "Source address is the server" + case SFlowURLdst: + return "Destination address is the server" + default: + return "" + } +} + +type SFlowExtendedURLRecord struct { + SFlowBaseFlowRecord + Direction SFlowURLDirection + URL string + Host string +} + +func decodeExtendedURLRecord(data *[]byte) (SFlowExtendedURLRecord, error) { + eur := SFlowExtendedURLRecord{} + var fdf SFlowFlowDataFormat + var urlLen uint32 + var urlLenWithPad int + var hostLen uint32 + var hostLenWithPad int + var urlBytes []byte + var hostBytes []byte + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + eur.EnterpriseID, eur.Format = fdf.decode() + *data, eur.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, eur.Direction = (*data)[4:], SFlowURLDirection(binary.BigEndian.Uint32((*data)[:4])) + *data, urlLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + urlLenWithPad = int(urlLen + ((4 - urlLen) % 4)) + *data, urlBytes = (*data)[urlLenWithPad:], (*data)[:urlLenWithPad] + eur.URL = string(urlBytes[:urlLen]) + *data, hostLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + hostLenWithPad = int(hostLen + ((4 - hostLen) % 4)) + *data, hostBytes = (*data)[hostLenWithPad:], (*data)[:hostLenWithPad] + eur.Host = string(hostBytes[:hostLen]) + return eur, nil +} + +// ************************************************** +// Extended User Flow Record +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source Character Set | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source User Id | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Destination Character Set | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Destination User ID | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowExtendedUserFlow struct { + SFlowBaseFlowRecord + SourceCharSet SFlowCharSet + SourceUserID string + DestinationCharSet SFlowCharSet + DestinationUserID string +} + +type SFlowCharSet uint32 + +const ( + SFlowCSunknown SFlowCharSet = 2 + SFlowCSASCII SFlowCharSet = 3 + SFlowCSISOLatin1 SFlowCharSet = 4 + SFlowCSISOLatin2 SFlowCharSet = 5 + SFlowCSISOLatin3 SFlowCharSet = 6 + SFlowCSISOLatin4 SFlowCharSet = 7 + SFlowCSISOLatinCyrillic SFlowCharSet = 8 + SFlowCSISOLatinArabic SFlowCharSet = 9 + SFlowCSISOLatinGreek SFlowCharSet = 10 + SFlowCSISOLatinHebrew SFlowCharSet = 11 + SFlowCSISOLatin5 SFlowCharSet = 12 + SFlowCSISOLatin6 SFlowCharSet = 13 + SFlowCSISOTextComm SFlowCharSet = 14 + SFlowCSHalfWidthKatakana SFlowCharSet = 15 + SFlowCSJISEncoding SFlowCharSet = 16 + SFlowCSShiftJIS SFlowCharSet = 17 + SFlowCSEUCPkdFmtJapanese SFlowCharSet = 18 + SFlowCSEUCFixWidJapanese SFlowCharSet = 19 + SFlowCSISO4UnitedKingdom SFlowCharSet = 20 + SFlowCSISO11SwedishForNames SFlowCharSet = 21 + SFlowCSISO15Italian SFlowCharSet = 22 + SFlowCSISO17Spanish SFlowCharSet = 23 + SFlowCSISO21German SFlowCharSet = 24 + SFlowCSISO60DanishNorwegian SFlowCharSet = 25 + SFlowCSISO69French SFlowCharSet = 26 + SFlowCSISO10646UTF1 SFlowCharSet = 27 + SFlowCSISO646basic1983 SFlowCharSet = 28 + SFlowCSINVARIANT SFlowCharSet = 29 + SFlowCSISO2IntlRefVersion SFlowCharSet = 30 + SFlowCSNATSSEFI SFlowCharSet = 31 + SFlowCSNATSSEFIADD SFlowCharSet = 32 + SFlowCSNATSDANO SFlowCharSet = 33 + SFlowCSNATSDANOADD SFlowCharSet = 34 + SFlowCSISO10Swedish SFlowCharSet = 35 + SFlowCSKSC56011987 SFlowCharSet = 36 + SFlowCSISO2022KR SFlowCharSet = 37 + SFlowCSEUCKR SFlowCharSet = 38 + SFlowCSISO2022JP SFlowCharSet = 39 + SFlowCSISO2022JP2 SFlowCharSet = 40 + SFlowCSISO13JISC6220jp SFlowCharSet = 41 + SFlowCSISO14JISC6220ro SFlowCharSet = 42 + SFlowCSISO16Portuguese SFlowCharSet = 43 + SFlowCSISO18Greek7Old SFlowCharSet = 44 + SFlowCSISO19LatinGreek SFlowCharSet = 45 + SFlowCSISO25French SFlowCharSet = 46 + SFlowCSISO27LatinGreek1 SFlowCharSet = 47 + SFlowCSISO5427Cyrillic SFlowCharSet = 48 + SFlowCSISO42JISC62261978 SFlowCharSet = 49 + SFlowCSISO47BSViewdata SFlowCharSet = 50 + SFlowCSISO49INIS SFlowCharSet = 51 + SFlowCSISO50INIS8 SFlowCharSet = 52 + SFlowCSISO51INISCyrillic SFlowCharSet = 53 + SFlowCSISO54271981 SFlowCharSet = 54 + SFlowCSISO5428Greek SFlowCharSet = 55 + SFlowCSISO57GB1988 SFlowCharSet = 56 + SFlowCSISO58GB231280 SFlowCharSet = 57 + SFlowCSISO61Norwegian2 SFlowCharSet = 58 + SFlowCSISO70VideotexSupp1 SFlowCharSet = 59 + SFlowCSISO84Portuguese2 SFlowCharSet = 60 + SFlowCSISO85Spanish2 SFlowCharSet = 61 + SFlowCSISO86Hungarian SFlowCharSet = 62 + SFlowCSISO87JISX0208 SFlowCharSet = 63 + SFlowCSISO88Greek7 SFlowCharSet = 64 + SFlowCSISO89ASMO449 SFlowCharSet = 65 + SFlowCSISO90 SFlowCharSet = 66 + SFlowCSISO91JISC62291984a SFlowCharSet = 67 + SFlowCSISO92JISC62991984b SFlowCharSet = 68 + SFlowCSISO93JIS62291984badd SFlowCharSet = 69 + SFlowCSISO94JIS62291984hand SFlowCharSet = 70 + SFlowCSISO95JIS62291984handadd SFlowCharSet = 71 + SFlowCSISO96JISC62291984kana SFlowCharSet = 72 + SFlowCSISO2033 SFlowCharSet = 73 + SFlowCSISO99NAPLPS SFlowCharSet = 74 + SFlowCSISO102T617bit SFlowCharSet = 75 + SFlowCSISO103T618bit SFlowCharSet = 76 + SFlowCSISO111ECMACyrillic SFlowCharSet = 77 + SFlowCSa71 SFlowCharSet = 78 + SFlowCSa72 SFlowCharSet = 79 + SFlowCSISO123CSAZ24341985gr SFlowCharSet = 80 + SFlowCSISO88596E SFlowCharSet = 81 + SFlowCSISO88596I SFlowCharSet = 82 + SFlowCSISO128T101G2 SFlowCharSet = 83 + SFlowCSISO88598E SFlowCharSet = 84 + SFlowCSISO88598I SFlowCharSet = 85 + SFlowCSISO139CSN369103 SFlowCharSet = 86 + SFlowCSISO141JUSIB1002 SFlowCharSet = 87 + SFlowCSISO143IECP271 SFlowCharSet = 88 + SFlowCSISO146Serbian SFlowCharSet = 89 + SFlowCSISO147Macedonian SFlowCharSet = 90 + SFlowCSISO150 SFlowCharSet = 91 + SFlowCSISO151Cuba SFlowCharSet = 92 + SFlowCSISO6937Add SFlowCharSet = 93 + SFlowCSISO153GOST1976874 SFlowCharSet = 94 + SFlowCSISO8859Supp SFlowCharSet = 95 + SFlowCSISO10367Box SFlowCharSet = 96 + SFlowCSISO158Lap SFlowCharSet = 97 + SFlowCSISO159JISX02121990 SFlowCharSet = 98 + SFlowCSISO646Danish SFlowCharSet = 99 + SFlowCSUSDK SFlowCharSet = 100 + SFlowCSDKUS SFlowCharSet = 101 + SFlowCSKSC5636 SFlowCharSet = 102 + SFlowCSUnicode11UTF7 SFlowCharSet = 103 + SFlowCSISO2022CN SFlowCharSet = 104 + SFlowCSISO2022CNEXT SFlowCharSet = 105 + SFlowCSUTF8 SFlowCharSet = 106 + SFlowCSISO885913 SFlowCharSet = 109 + SFlowCSISO885914 SFlowCharSet = 110 + SFlowCSISO885915 SFlowCharSet = 111 + SFlowCSISO885916 SFlowCharSet = 112 + SFlowCSGBK SFlowCharSet = 113 + SFlowCSGB18030 SFlowCharSet = 114 + SFlowCSOSDEBCDICDF0415 SFlowCharSet = 115 + SFlowCSOSDEBCDICDF03IRV SFlowCharSet = 116 + SFlowCSOSDEBCDICDF041 SFlowCharSet = 117 + SFlowCSISO115481 SFlowCharSet = 118 + SFlowCSKZ1048 SFlowCharSet = 119 + SFlowCSUnicode SFlowCharSet = 1000 + SFlowCSUCS4 SFlowCharSet = 1001 + SFlowCSUnicodeASCII SFlowCharSet = 1002 + SFlowCSUnicodeLatin1 SFlowCharSet = 1003 + SFlowCSUnicodeJapanese SFlowCharSet = 1004 + SFlowCSUnicodeIBM1261 SFlowCharSet = 1005 + SFlowCSUnicodeIBM1268 SFlowCharSet = 1006 + SFlowCSUnicodeIBM1276 SFlowCharSet = 1007 + SFlowCSUnicodeIBM1264 SFlowCharSet = 1008 + SFlowCSUnicodeIBM1265 SFlowCharSet = 1009 + SFlowCSUnicode11 SFlowCharSet = 1010 + SFlowCSSCSU SFlowCharSet = 1011 + SFlowCSUTF7 SFlowCharSet = 1012 + SFlowCSUTF16BE SFlowCharSet = 1013 + SFlowCSUTF16LE SFlowCharSet = 1014 + SFlowCSUTF16 SFlowCharSet = 1015 + SFlowCSCESU8 SFlowCharSet = 1016 + SFlowCSUTF32 SFlowCharSet = 1017 + SFlowCSUTF32BE SFlowCharSet = 1018 + SFlowCSUTF32LE SFlowCharSet = 1019 + SFlowCSBOCU1 SFlowCharSet = 1020 + SFlowCSWindows30Latin1 SFlowCharSet = 2000 + SFlowCSWindows31Latin1 SFlowCharSet = 2001 + SFlowCSWindows31Latin2 SFlowCharSet = 2002 + SFlowCSWindows31Latin5 SFlowCharSet = 2003 + SFlowCSHPRoman8 SFlowCharSet = 2004 + SFlowCSAdobeStandardEncoding SFlowCharSet = 2005 + SFlowCSVenturaUS SFlowCharSet = 2006 + SFlowCSVenturaInternational SFlowCharSet = 2007 + SFlowCSDECMCS SFlowCharSet = 2008 + SFlowCSPC850Multilingual SFlowCharSet = 2009 + SFlowCSPCp852 SFlowCharSet = 2010 + SFlowCSPC8CodePage437 SFlowCharSet = 2011 + SFlowCSPC8DanishNorwegian SFlowCharSet = 2012 + SFlowCSPC862LatinHebrew SFlowCharSet = 2013 + SFlowCSPC8Turkish SFlowCharSet = 2014 + SFlowCSIBMSymbols SFlowCharSet = 2015 + SFlowCSIBMThai SFlowCharSet = 2016 + SFlowCSHPLegal SFlowCharSet = 2017 + SFlowCSHPPiFont SFlowCharSet = 2018 + SFlowCSHPMath8 SFlowCharSet = 2019 + SFlowCSHPPSMath SFlowCharSet = 2020 + SFlowCSHPDesktop SFlowCharSet = 2021 + SFlowCSVenturaMath SFlowCharSet = 2022 + SFlowCSMicrosoftPublishing SFlowCharSet = 2023 + SFlowCSWindows31J SFlowCharSet = 2024 + SFlowCSGB2312 SFlowCharSet = 2025 + SFlowCSBig5 SFlowCharSet = 2026 + SFlowCSMacintosh SFlowCharSet = 2027 + SFlowCSIBM037 SFlowCharSet = 2028 + SFlowCSIBM038 SFlowCharSet = 2029 + SFlowCSIBM273 SFlowCharSet = 2030 + SFlowCSIBM274 SFlowCharSet = 2031 + SFlowCSIBM275 SFlowCharSet = 2032 + SFlowCSIBM277 SFlowCharSet = 2033 + SFlowCSIBM278 SFlowCharSet = 2034 + SFlowCSIBM280 SFlowCharSet = 2035 + SFlowCSIBM281 SFlowCharSet = 2036 + SFlowCSIBM284 SFlowCharSet = 2037 + SFlowCSIBM285 SFlowCharSet = 2038 + SFlowCSIBM290 SFlowCharSet = 2039 + SFlowCSIBM297 SFlowCharSet = 2040 + SFlowCSIBM420 SFlowCharSet = 2041 + SFlowCSIBM423 SFlowCharSet = 2042 + SFlowCSIBM424 SFlowCharSet = 2043 + SFlowCSIBM500 SFlowCharSet = 2044 + SFlowCSIBM851 SFlowCharSet = 2045 + SFlowCSIBM855 SFlowCharSet = 2046 + SFlowCSIBM857 SFlowCharSet = 2047 + SFlowCSIBM860 SFlowCharSet = 2048 + SFlowCSIBM861 SFlowCharSet = 2049 + SFlowCSIBM863 SFlowCharSet = 2050 + SFlowCSIBM864 SFlowCharSet = 2051 + SFlowCSIBM865 SFlowCharSet = 2052 + SFlowCSIBM868 SFlowCharSet = 2053 + SFlowCSIBM869 SFlowCharSet = 2054 + SFlowCSIBM870 SFlowCharSet = 2055 + SFlowCSIBM871 SFlowCharSet = 2056 + SFlowCSIBM880 SFlowCharSet = 2057 + SFlowCSIBM891 SFlowCharSet = 2058 + SFlowCSIBM903 SFlowCharSet = 2059 + SFlowCSIBBM904 SFlowCharSet = 2060 + SFlowCSIBM905 SFlowCharSet = 2061 + SFlowCSIBM918 SFlowCharSet = 2062 + SFlowCSIBM1026 SFlowCharSet = 2063 + SFlowCSIBMEBCDICATDE SFlowCharSet = 2064 + SFlowCSEBCDICATDEA SFlowCharSet = 2065 + SFlowCSEBCDICCAFR SFlowCharSet = 2066 + SFlowCSEBCDICDKNO SFlowCharSet = 2067 + SFlowCSEBCDICDKNOA SFlowCharSet = 2068 + SFlowCSEBCDICFISE SFlowCharSet = 2069 + SFlowCSEBCDICFISEA SFlowCharSet = 2070 + SFlowCSEBCDICFR SFlowCharSet = 2071 + SFlowCSEBCDICIT SFlowCharSet = 2072 + SFlowCSEBCDICPT SFlowCharSet = 2073 + SFlowCSEBCDICES SFlowCharSet = 2074 + SFlowCSEBCDICESA SFlowCharSet = 2075 + SFlowCSEBCDICESS SFlowCharSet = 2076 + SFlowCSEBCDICUK SFlowCharSet = 2077 + SFlowCSEBCDICUS SFlowCharSet = 2078 + SFlowCSUnknown8BiT SFlowCharSet = 2079 + SFlowCSMnemonic SFlowCharSet = 2080 + SFlowCSMnem SFlowCharSet = 2081 + SFlowCSVISCII SFlowCharSet = 2082 + SFlowCSVIQR SFlowCharSet = 2083 + SFlowCSKOI8R SFlowCharSet = 2084 + SFlowCSHZGB2312 SFlowCharSet = 2085 + SFlowCSIBM866 SFlowCharSet = 2086 + SFlowCSPC775Baltic SFlowCharSet = 2087 + SFlowCSKOI8U SFlowCharSet = 2088 + SFlowCSIBM00858 SFlowCharSet = 2089 + SFlowCSIBM00924 SFlowCharSet = 2090 + SFlowCSIBM01140 SFlowCharSet = 2091 + SFlowCSIBM01141 SFlowCharSet = 2092 + SFlowCSIBM01142 SFlowCharSet = 2093 + SFlowCSIBM01143 SFlowCharSet = 2094 + SFlowCSIBM01144 SFlowCharSet = 2095 + SFlowCSIBM01145 SFlowCharSet = 2096 + SFlowCSIBM01146 SFlowCharSet = 2097 + SFlowCSIBM01147 SFlowCharSet = 2098 + SFlowCSIBM01148 SFlowCharSet = 2099 + SFlowCSIBM01149 SFlowCharSet = 2100 + SFlowCSBig5HKSCS SFlowCharSet = 2101 + SFlowCSIBM1047 SFlowCharSet = 2102 + SFlowCSPTCP154 SFlowCharSet = 2103 + SFlowCSAmiga1251 SFlowCharSet = 2104 + SFlowCSKOI7switched SFlowCharSet = 2105 + SFlowCSBRF SFlowCharSet = 2106 + SFlowCSTSCII SFlowCharSet = 2107 + SFlowCSCP51932 SFlowCharSet = 2108 + SFlowCSWindows874 SFlowCharSet = 2109 + SFlowCSWindows1250 SFlowCharSet = 2250 + SFlowCSWindows1251 SFlowCharSet = 2251 + SFlowCSWindows1252 SFlowCharSet = 2252 + SFlowCSWindows1253 SFlowCharSet = 2253 + SFlowCSWindows1254 SFlowCharSet = 2254 + SFlowCSWindows1255 SFlowCharSet = 2255 + SFlowCSWindows1256 SFlowCharSet = 2256 + SFlowCSWindows1257 SFlowCharSet = 2257 + SFlowCSWindows1258 SFlowCharSet = 2258 + SFlowCSTIS620 SFlowCharSet = 2259 + SFlowCS50220 SFlowCharSet = 2260 + SFlowCSreserved SFlowCharSet = 3000 +) + +func decodeExtendedUserFlow(data *[]byte) (SFlowExtendedUserFlow, error) { + eu := SFlowExtendedUserFlow{} + var fdf SFlowFlowDataFormat + var srcUserLen uint32 + var srcUserLenWithPad int + var srcUserBytes []byte + var dstUserLen uint32 + var dstUserLenWithPad int + var dstUserBytes []byte + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + eu.EnterpriseID, eu.Format = fdf.decode() + *data, eu.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, eu.SourceCharSet = (*data)[4:], SFlowCharSet(binary.BigEndian.Uint32((*data)[:4])) + *data, srcUserLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + srcUserLenWithPad = int(srcUserLen + ((4 - srcUserLen) % 4)) + *data, srcUserBytes = (*data)[srcUserLenWithPad:], (*data)[:srcUserLenWithPad] + eu.SourceUserID = string(srcUserBytes[:srcUserLen]) + *data, eu.DestinationCharSet = (*data)[4:], SFlowCharSet(binary.BigEndian.Uint32((*data)[:4])) + *data, dstUserLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + dstUserLenWithPad = int(dstUserLen + ((4 - dstUserLen) % 4)) + *data, dstUserBytes = (*data)[dstUserLenWithPad:], (*data)[:dstUserLenWithPad] + eu.DestinationUserID = string(dstUserBytes[:dstUserLen]) + return eu, nil +} + +// ************************************************** +// Packet IP version 4 Record +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Protocol | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source IPv4 | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Destination IPv4 | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source Port | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Destionation Port | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | TCP Flags | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | TOS | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowIpv4Record struct { + // The length of the IP packet excluding ower layer encapsulations + Length uint32 + // IP Protocol type (for example, TCP = 6, UDP = 17) + Protocol uint32 + // Source IP Address + IPSrc net.IP + // Destination IP Address + IPDst net.IP + // TCP/UDP source port number or equivalent + PortSrc uint32 + // TCP/UDP destination port number or equivalent + PortDst uint32 + // TCP flags + TCPFlags uint32 + // IP type of service + TOS uint32 +} + +func decodeSFlowIpv4Record(data *[]byte) (SFlowIpv4Record, error) { + si := SFlowIpv4Record{} + + *data, si.Length = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.Protocol = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.IPSrc = (*data)[4:], net.IP((*data)[:4]) + *data, si.IPDst = (*data)[4:], net.IP((*data)[:4]) + *data, si.PortSrc = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.PortDst = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.TCPFlags = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.TOS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return si, nil +} + +// ************************************************** +// Packet IP version 6 Record +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Protocol | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source IPv4 | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Destination IPv4 | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source Port | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Destionation Port | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | TCP Flags | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Priority | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowIpv6Record struct { + // The length of the IP packet excluding ower layer encapsulations + Length uint32 + // IP Protocol type (for example, TCP = 6, UDP = 17) + Protocol uint32 + // Source IP Address + IPSrc net.IP + // Destination IP Address + IPDst net.IP + // TCP/UDP source port number or equivalent + PortSrc uint32 + // TCP/UDP destination port number or equivalent + PortDst uint32 + // TCP flags + TCPFlags uint32 + // IP priority + Priority uint32 +} + +func decodeSFlowIpv6Record(data *[]byte) (SFlowIpv6Record, error) { + si := SFlowIpv6Record{} + + *data, si.Length = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.Protocol = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.IPSrc = (*data)[16:], net.IP((*data)[:16]) + *data, si.IPDst = (*data)[16:], net.IP((*data)[:16]) + *data, si.PortSrc = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.PortDst = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.TCPFlags = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.Priority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return si, nil +} + +// ************************************************** +// Extended IPv4 Tunnel Egress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Packet IP version 4 Record / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedIpv4TunnelEgressRecord struct { + SFlowBaseFlowRecord + SFlowIpv4Record SFlowIpv4Record +} + +func decodeExtendedIpv4TunnelEgress(data *[]byte) (SFlowExtendedIpv4TunnelEgressRecord, error) { + rec := SFlowExtendedIpv4TunnelEgressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + rec.SFlowIpv4Record, _ = decodeSFlowIpv4Record(data) + + return rec, nil +} + +// ************************************************** +// Extended IPv4 Tunnel Ingress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Packet IP version 4 Record / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedIpv4TunnelIngressRecord struct { + SFlowBaseFlowRecord + SFlowIpv4Record SFlowIpv4Record +} + +func decodeExtendedIpv4TunnelIngress(data *[]byte) (SFlowExtendedIpv4TunnelIngressRecord, error) { + rec := SFlowExtendedIpv4TunnelIngressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + rec.SFlowIpv4Record, _ = decodeSFlowIpv4Record(data) + + return rec, nil +} + +// ************************************************** +// Extended IPv6 Tunnel Egress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Packet IP version 6 Record / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedIpv6TunnelEgressRecord struct { + SFlowBaseFlowRecord + SFlowIpv6Record +} + +func decodeExtendedIpv6TunnelEgress(data *[]byte) (SFlowExtendedIpv6TunnelEgressRecord, error) { + rec := SFlowExtendedIpv6TunnelEgressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + rec.SFlowIpv6Record, _ = decodeSFlowIpv6Record(data) + + return rec, nil +} + +// ************************************************** +// Extended IPv6 Tunnel Ingress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Packet IP version 6 Record / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedIpv6TunnelIngressRecord struct { + SFlowBaseFlowRecord + SFlowIpv6Record +} + +func decodeExtendedIpv6TunnelIngress(data *[]byte) (SFlowExtendedIpv6TunnelIngressRecord, error) { + rec := SFlowExtendedIpv6TunnelIngressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + rec.SFlowIpv6Record, _ = decodeSFlowIpv6Record(data) + + return rec, nil +} + +// ************************************************** +// Extended Decapsulate Egress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Inner Header Offset | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedDecapsulateEgressRecord struct { + SFlowBaseFlowRecord + InnerHeaderOffset uint32 +} + +func decodeExtendedDecapsulateEgress(data *[]byte) (SFlowExtendedDecapsulateEgressRecord, error) { + rec := SFlowExtendedDecapsulateEgressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, rec.InnerHeaderOffset = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return rec, nil +} + +// ************************************************** +// Extended Decapsulate Ingress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Inner Header Offset | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedDecapsulateIngressRecord struct { + SFlowBaseFlowRecord + InnerHeaderOffset uint32 +} + +func decodeExtendedDecapsulateIngress(data *[]byte) (SFlowExtendedDecapsulateIngressRecord, error) { + rec := SFlowExtendedDecapsulateIngressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, rec.InnerHeaderOffset = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return rec, nil +} + +// ************************************************** +// Extended VNI Egress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | VNI | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedVniEgressRecord struct { + SFlowBaseFlowRecord + VNI uint32 +} + +func decodeExtendedVniEgress(data *[]byte) (SFlowExtendedVniEgressRecord, error) { + rec := SFlowExtendedVniEgressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, rec.VNI = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return rec, nil +} + +// ************************************************** +// Extended VNI Ingress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | VNI | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedVniIngressRecord struct { + SFlowBaseFlowRecord + VNI uint32 +} + +func decodeExtendedVniIngress(data *[]byte) (SFlowExtendedVniIngressRecord, error) { + rec := SFlowExtendedVniIngressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, rec.VNI = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return rec, nil +} + +// ************************************************** +// Counter Record +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | counter length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / counter data / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowBaseCounterRecord struct { + EnterpriseID SFlowEnterpriseID + Format SFlowCounterRecordType + FlowDataLength uint32 +} + +func (bcr SFlowBaseCounterRecord) GetType() SFlowCounterRecordType { + switch bcr.Format { + case SFlowTypeGenericInterfaceCounters: + return SFlowTypeGenericInterfaceCounters + case SFlowTypeEthernetInterfaceCounters: + return SFlowTypeEthernetInterfaceCounters + case SFlowTypeTokenRingInterfaceCounters: + return SFlowTypeTokenRingInterfaceCounters + case SFlowType100BaseVGInterfaceCounters: + return SFlowType100BaseVGInterfaceCounters + case SFlowTypeVLANCounters: + return SFlowTypeVLANCounters + case SFlowTypeLACPCounters: + return SFlowTypeLACPCounters + case SFlowTypeProcessorCounters: + return SFlowTypeProcessorCounters + case SFlowTypeOpenflowPortCounters: + return SFlowTypeOpenflowPortCounters + case SFlowTypePORTNAMECounters: + return SFlowTypePORTNAMECounters + case SFLowTypeAPPRESOURCESCounters: + return SFLowTypeAPPRESOURCESCounters + case SFlowTypeOVSDPCounters: + return SFlowTypeOVSDPCounters + } + unrecognized := fmt.Sprint("Unrecognized counter record type:", bcr.Format) + panic(unrecognized) +} + +// ************************************************** +// Counter Record +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | counter length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfIndex | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfType | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfSpeed | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfDirection | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfStatus | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IFInOctets | +// | | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfInUcastPkts | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfInMulticastPkts | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfInBroadcastPkts | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfInDiscards | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | InInErrors | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfInUnknownProtos | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfOutOctets | +// | | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfOutUcastPkts | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfOutMulticastPkts | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfOutBroadcastPkts | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfOutDiscards | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfOUtErrors | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfPromiscouousMode | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowGenericInterfaceCounters struct { + SFlowBaseCounterRecord + IfIndex uint32 + IfType uint32 + IfSpeed uint64 + IfDirection uint32 + IfStatus uint32 + IfInOctets uint64 + IfInUcastPkts uint32 + IfInMulticastPkts uint32 + IfInBroadcastPkts uint32 + IfInDiscards uint32 + IfInErrors uint32 + IfInUnknownProtos uint32 + IfOutOctets uint64 + IfOutUcastPkts uint32 + IfOutMulticastPkts uint32 + IfOutBroadcastPkts uint32 + IfOutDiscards uint32 + IfOutErrors uint32 + IfPromiscuousMode uint32 +} + +func decodeGenericInterfaceCounters(data *[]byte) (SFlowGenericInterfaceCounters, error) { + gic := SFlowGenericInterfaceCounters{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + gic.EnterpriseID, gic.Format = cdf.decode() + *data, gic.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfIndex = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfType = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfSpeed = (*data)[8:], binary.BigEndian.Uint64((*data)[:8]) + *data, gic.IfDirection = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfStatus = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfInOctets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8]) + *data, gic.IfInUcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfInMulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfInBroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfInDiscards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfInErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfInUnknownProtos = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfOutOctets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8]) + *data, gic.IfOutUcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfOutMulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfOutBroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfOutDiscards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfOutErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfPromiscuousMode = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + return gic, nil +} + +// ************************************************** +// Counter Record +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | counter length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / counter data / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowEthernetCounters struct { + SFlowBaseCounterRecord + AlignmentErrors uint32 + FCSErrors uint32 + SingleCollisionFrames uint32 + MultipleCollisionFrames uint32 + SQETestErrors uint32 + DeferredTransmissions uint32 + LateCollisions uint32 + ExcessiveCollisions uint32 + InternalMacTransmitErrors uint32 + CarrierSenseErrors uint32 + FrameTooLongs uint32 + InternalMacReceiveErrors uint32 + SymbolErrors uint32 +} + +func decodeEthernetCounters(data *[]byte) (SFlowEthernetCounters, error) { + ec := SFlowEthernetCounters{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + ec.EnterpriseID, ec.Format = cdf.decode() + if len(*data) < 4 { + return SFlowEthernetCounters{}, errors.New("ethernet counters too small") + } + *data, ec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowEthernetCounters{}, errors.New("ethernet counters too small") + } + *data, ec.AlignmentErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowEthernetCounters{}, errors.New("ethernet counters too small") + } + *data, ec.FCSErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowEthernetCounters{}, errors.New("ethernet counters too small") + } + *data, ec.SingleCollisionFrames = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowEthernetCounters{}, errors.New("ethernet counters too small") + } + *data, ec.MultipleCollisionFrames = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowEthernetCounters{}, errors.New("ethernet counters too small") + } + *data, ec.SQETestErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowEthernetCounters{}, errors.New("ethernet counters too small") + } + *data, ec.DeferredTransmissions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowEthernetCounters{}, errors.New("ethernet counters too small") + } + *data, ec.LateCollisions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowEthernetCounters{}, errors.New("ethernet counters too small") + } + *data, ec.ExcessiveCollisions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowEthernetCounters{}, errors.New("ethernet counters too small") + } + *data, ec.InternalMacTransmitErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowEthernetCounters{}, errors.New("ethernet counters too small") + } + *data, ec.CarrierSenseErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowEthernetCounters{}, errors.New("ethernet counters too small") + } + *data, ec.FrameTooLongs = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowEthernetCounters{}, errors.New("ethernet counters too small") + } + *data, ec.InternalMacReceiveErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if len(*data) < 4 { + return SFlowEthernetCounters{}, errors.New("ethernet counters too small") + } + *data, ec.SymbolErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + return ec, nil +} + +// VLAN Counter + +type SFlowVLANCounters struct { + SFlowBaseCounterRecord + VlanID uint32 + Octets uint64 + UcastPkts uint32 + MulticastPkts uint32 + BroadcastPkts uint32 + Discards uint32 +} + +func decodeVLANCounters(data *[]byte) (SFlowVLANCounters, error) { + vc := SFlowVLANCounters{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + vc.EnterpriseID, vc.Format = cdf.decode() + vc.EnterpriseID, vc.Format = cdf.decode() + *data, vc.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, vc.VlanID = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, vc.Octets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8]) + *data, vc.UcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, vc.MulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, vc.BroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, vc.Discards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + return vc, nil +} + +//SFLLACPportState : SFlow LACP Port State (All(4) - 32 bit) +type SFLLACPPortState struct { + PortStateAll uint32 +} + +//LACPcounters : LACP SFlow Counters ( 64 Bytes ) +type SFlowLACPCounters struct { + SFlowBaseCounterRecord + ActorSystemID net.HardwareAddr + PartnerSystemID net.HardwareAddr + AttachedAggID uint32 + LacpPortState SFLLACPPortState + LACPDUsRx uint32 + MarkerPDUsRx uint32 + MarkerResponsePDUsRx uint32 + UnknownRx uint32 + IllegalRx uint32 + LACPDUsTx uint32 + MarkerPDUsTx uint32 + MarkerResponsePDUsTx uint32 +} + +func decodeLACPCounters(data *[]byte) (SFlowLACPCounters, error) { + la := SFlowLACPCounters{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + la.EnterpriseID, la.Format = cdf.decode() + *data, la.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.ActorSystemID = (*data)[6:], (*data)[:6] + *data = (*data)[2:] // remove padding + *data, la.PartnerSystemID = (*data)[6:], (*data)[:6] + *data = (*data)[2:] //remove padding + *data, la.AttachedAggID = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.LacpPortState.PortStateAll = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.LACPDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.MarkerPDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.MarkerResponsePDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.UnknownRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.IllegalRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.LACPDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.MarkerPDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.MarkerResponsePDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return la, nil + +} + +// ************************************************** +// Processor Counter Record +// ************************************************** +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | counter length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | FiveSecCpu | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | OneMinCpu | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | GiveMinCpu | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | TotalMemory | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | FreeMemory | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowProcessorCounters struct { + SFlowBaseCounterRecord + FiveSecCpu uint32 // 5 second average CPU utilization + OneMinCpu uint32 // 1 minute average CPU utilization + FiveMinCpu uint32 // 5 minute average CPU utilization + TotalMemory uint64 // total memory (in bytes) + FreeMemory uint64 // free memory (in bytes) +} + +func decodeProcessorCounters(data *[]byte) (SFlowProcessorCounters, error) { + pc := SFlowProcessorCounters{} + var cdf SFlowCounterDataFormat + var high32, low32 uint32 + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + pc.EnterpriseID, pc.Format = cdf.decode() + *data, pc.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + *data, pc.FiveSecCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, pc.OneMinCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, pc.FiveMinCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, high32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, low32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + pc.TotalMemory = (uint64(high32) << 32) + uint64(low32) + *data, high32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, low32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + pc.FreeMemory = (uint64(high32)) + uint64(low32) + + return pc, nil +} + +// SFlowEthernetFrameFlowRecord give additional information +// about the sampled packet if it's available. +// An agent may or may not provide this information. +type SFlowEthernetFrameFlowRecord struct { + SFlowBaseFlowRecord + FrameLength uint32 + SrcMac net.HardwareAddr + DstMac net.HardwareAddr + Type uint32 +} + +// Ethernet frame flow records have the following structure: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source Mac Address | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Destination Mac Address | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Ethernet Packet Type | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +func decodeEthernetFrameFlowRecord(data *[]byte) (SFlowEthernetFrameFlowRecord, error) { + es := SFlowEthernetFrameFlowRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + es.EnterpriseID, es.Format = fdf.decode() + *data, es.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + *data, es.FrameLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, es.SrcMac = (*data)[8:], net.HardwareAddr((*data)[:6]) + *data, es.DstMac = (*data)[8:], net.HardwareAddr((*data)[:6]) + *data, es.Type = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + return es, nil +} + +//SFlowOpenflowPortCounters : OVS-Sflow OpenFlow Port Counter ( 20 Bytes ) +type SFlowOpenflowPortCounters struct { + SFlowBaseCounterRecord + DatapathID uint64 + PortNo uint32 +} + +func decodeOpenflowportCounters(data *[]byte) (SFlowOpenflowPortCounters, error) { + ofp := SFlowOpenflowPortCounters{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + ofp.EnterpriseID, ofp.Format = cdf.decode() + *data, ofp.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ofp.DatapathID = (*data)[8:], binary.BigEndian.Uint64((*data)[:8]) + *data, ofp.PortNo = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return ofp, nil +} + +//SFlowAppresourcesCounters : OVS_Sflow App Resources Counter ( 48 Bytes ) +type SFlowAppresourcesCounters struct { + SFlowBaseCounterRecord + UserTime uint32 + SystemTime uint32 + MemUsed uint64 + MemMax uint64 + FdOpen uint32 + FdMax uint32 + ConnOpen uint32 + ConnMax uint32 +} + +func decodeAppresourcesCounters(data *[]byte) (SFlowAppresourcesCounters, error) { + app := SFlowAppresourcesCounters{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + app.EnterpriseID, app.Format = cdf.decode() + *data, app.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, app.UserTime = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, app.SystemTime = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, app.MemUsed = (*data)[8:], binary.BigEndian.Uint64((*data)[:8]) + *data, app.MemMax = (*data)[8:], binary.BigEndian.Uint64((*data)[:8]) + *data, app.FdOpen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, app.FdMax = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, app.ConnOpen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, app.ConnMax = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return app, nil +} + +//SFlowOVSDPCounters : OVS-Sflow DataPath Counter ( 32 Bytes ) +type SFlowOVSDPCounters struct { + SFlowBaseCounterRecord + NHit uint32 + NMissed uint32 + NLost uint32 + NMaskHit uint32 + NFlows uint32 + NMasks uint32 +} + +func decodeOVSDPCounters(data *[]byte) (SFlowOVSDPCounters, error) { + dp := SFlowOVSDPCounters{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + dp.EnterpriseID, dp.Format = cdf.decode() + *data, dp.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, dp.NHit = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, dp.NMissed = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, dp.NLost = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, dp.NMaskHit = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, dp.NFlows = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, dp.NMasks = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return dp, nil +} + +//SFlowPORTNAME : OVS-Sflow PORTNAME Counter Sampletype ( 20 Bytes ) +type SFlowPORTNAME struct { + SFlowBaseCounterRecord + Len uint32 + Str string +} + +func decodeString(data *[]byte) (len uint32, str string) { + *data, len = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + str = string((*data)[:len]) + if (len % 4) != 0 { + len += 4 - len%4 + } + *data = (*data)[len:] + return +} + +func decodePortnameCounters(data *[]byte) (SFlowPORTNAME, error) { + pn := SFlowPORTNAME{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + pn.EnterpriseID, pn.Format = cdf.decode() + *data, pn.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + pn.Len, pn.Str = decodeString(data) + + return pn, nil +} diff --git a/vendor/github.com/google/gopacket/layers/sip.go b/vendor/github.com/google/gopacket/layers/sip.go new file mode 100644 index 0000000000..70afdb5c06 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/sip.go @@ -0,0 +1,542 @@ +// Copyright 2017 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/google/gopacket" +) + +// SIPVersion defines the different versions of the SIP Protocol +type SIPVersion uint8 + +// Represents all the versions of SIP protocol +const ( + SIPVersion1 SIPVersion = 1 + SIPVersion2 SIPVersion = 2 +) + +func (sv SIPVersion) String() string { + switch sv { + default: + // Defaulting to SIP/2.0 + return "SIP/2.0" + case SIPVersion1: + return "SIP/1.0" + case SIPVersion2: + return "SIP/2.0" + } +} + +// GetSIPVersion is used to get SIP version constant +func GetSIPVersion(version string) (SIPVersion, error) { + switch strings.ToUpper(version) { + case "SIP/1.0": + return SIPVersion1, nil + case "SIP/2.0": + return SIPVersion2, nil + default: + return 0, fmt.Errorf("Unknown SIP version: '%s'", version) + + } +} + +// SIPMethod defines the different methods of the SIP Protocol +// defined in the different RFC's +type SIPMethod uint16 + +// Here are all the SIP methods +const ( + SIPMethodInvite SIPMethod = 1 // INVITE [RFC3261] + SIPMethodAck SIPMethod = 2 // ACK [RFC3261] + SIPMethodBye SIPMethod = 3 // BYE [RFC3261] + SIPMethodCancel SIPMethod = 4 // CANCEL [RFC3261] + SIPMethodOptions SIPMethod = 5 // OPTIONS [RFC3261] + SIPMethodRegister SIPMethod = 6 // REGISTER [RFC3261] + SIPMethodPrack SIPMethod = 7 // PRACK [RFC3262] + SIPMethodSubscribe SIPMethod = 8 // SUBSCRIBE [RFC6665] + SIPMethodNotify SIPMethod = 9 // NOTIFY [RFC6665] + SIPMethodPublish SIPMethod = 10 // PUBLISH [RFC3903] + SIPMethodInfo SIPMethod = 11 // INFO [RFC6086] + SIPMethodRefer SIPMethod = 12 // REFER [RFC3515] + SIPMethodMessage SIPMethod = 13 // MESSAGE [RFC3428] + SIPMethodUpdate SIPMethod = 14 // UPDATE [RFC3311] + SIPMethodPing SIPMethod = 15 // PING [https://tools.ietf.org/html/draft-fwmiller-ping-03] +) + +func (sm SIPMethod) String() string { + switch sm { + default: + return "Unknown method" + case SIPMethodInvite: + return "INVITE" + case SIPMethodAck: + return "ACK" + case SIPMethodBye: + return "BYE" + case SIPMethodCancel: + return "CANCEL" + case SIPMethodOptions: + return "OPTIONS" + case SIPMethodRegister: + return "REGISTER" + case SIPMethodPrack: + return "PRACK" + case SIPMethodSubscribe: + return "SUBSCRIBE" + case SIPMethodNotify: + return "NOTIFY" + case SIPMethodPublish: + return "PUBLISH" + case SIPMethodInfo: + return "INFO" + case SIPMethodRefer: + return "REFER" + case SIPMethodMessage: + return "MESSAGE" + case SIPMethodUpdate: + return "UPDATE" + case SIPMethodPing: + return "PING" + } +} + +// GetSIPMethod returns the constant of a SIP method +// from its string +func GetSIPMethod(method string) (SIPMethod, error) { + switch strings.ToUpper(method) { + case "INVITE": + return SIPMethodInvite, nil + case "ACK": + return SIPMethodAck, nil + case "BYE": + return SIPMethodBye, nil + case "CANCEL": + return SIPMethodCancel, nil + case "OPTIONS": + return SIPMethodOptions, nil + case "REGISTER": + return SIPMethodRegister, nil + case "PRACK": + return SIPMethodPrack, nil + case "SUBSCRIBE": + return SIPMethodSubscribe, nil + case "NOTIFY": + return SIPMethodNotify, nil + case "PUBLISH": + return SIPMethodPublish, nil + case "INFO": + return SIPMethodInfo, nil + case "REFER": + return SIPMethodRefer, nil + case "MESSAGE": + return SIPMethodMessage, nil + case "UPDATE": + return SIPMethodUpdate, nil + case "PING": + return SIPMethodPing, nil + default: + return 0, fmt.Errorf("Unknown SIP method: '%s'", method) + } +} + +// Here is a correspondance between long header names and short +// as defined in rfc3261 in section 20 +var compactSipHeadersCorrespondance = map[string]string{ + "accept-contact": "a", + "allow-events": "u", + "call-id": "i", + "contact": "m", + "content-encoding": "e", + "content-length": "l", + "content-type": "c", + "event": "o", + "from": "f", + "identity": "y", + "refer-to": "r", + "referred-by": "b", + "reject-contact": "j", + "request-disposition": "d", + "session-expires": "x", + "subject": "s", + "supported": "k", + "to": "t", + "via": "v", +} + +// SIP object will contains information about decoded SIP packet. +// -> The SIP Version +// -> The SIP Headers (in a map[string][]string because of multiple headers with the same name +// -> The SIP Method +// -> The SIP Response code (if it's a response) +// -> The SIP Status line (if it's a response) +// You can easily know the type of the packet with the IsResponse boolean +// +type SIP struct { + BaseLayer + + // Base information + Version SIPVersion + Method SIPMethod + Headers map[string][]string + + // Request + RequestURI string + + // Response + IsResponse bool + ResponseCode int + ResponseStatus string + + // Private fields + cseq int64 + contentLength int64 + lastHeaderParsed string +} + +// decodeSIP decodes the byte slice into a SIP type. It also +// setups the application Layer in PacketBuilder. +func decodeSIP(data []byte, p gopacket.PacketBuilder) error { + s := NewSIP() + err := s.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(s) + p.SetApplicationLayer(s) + return nil +} + +// NewSIP instantiates a new empty SIP object +func NewSIP() *SIP { + s := new(SIP) + s.Headers = make(map[string][]string) + return s +} + +// LayerType returns gopacket.LayerTypeSIP. +func (s *SIP) LayerType() gopacket.LayerType { + return LayerTypeSIP +} + +// Payload returns the base layer payload +func (s *SIP) Payload() []byte { + return s.BaseLayer.Payload +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode +func (s *SIP) CanDecode() gopacket.LayerClass { + return LayerTypeSIP +} + +// NextLayerType returns the layer type contained by this DecodingLayer +func (s *SIP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// DecodeFromBytes decodes the slice into the SIP struct. +func (s *SIP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + // Init some vars for parsing follow-up + var countLines int + var line []byte + var err error + var offset int + + // Iterate on all lines of the SIP Headers + // and stop when we reach the SDP (aka when the new line + // is at index 0 of the remaining packet) + buffer := bytes.NewBuffer(data) + + for { + + // Read next line + line, err = buffer.ReadBytes(byte('\n')) + if err != nil { + if err == io.EOF { + if len(bytes.Trim(line, "\r\n")) > 0 { + df.SetTruncated() + } + break + } else { + return err + } + } + offset += len(line) + + // Trim the new line delimiters + line = bytes.Trim(line, "\r\n") + + // Empty line, we hit Body + if len(line) == 0 { + break + } + + // First line is the SIP request/response line + // Other lines are headers + if countLines == 0 { + err = s.ParseFirstLine(line) + if err != nil { + return err + } + + } else { + err = s.ParseHeader(line) + if err != nil { + return err + } + } + + countLines++ + } + s.BaseLayer = BaseLayer{Contents: data[:offset], Payload: data[offset:]} + + return nil +} + +// ParseFirstLine will compute the first line of a SIP packet. +// The first line will tell us if it's a request or a response. +// +// Examples of first line of SIP Prococol : +// +// Request : INVITE bob@example.com SIP/2.0 +// Response : SIP/2.0 200 OK +// Response : SIP/2.0 501 Not Implemented +// +func (s *SIP) ParseFirstLine(firstLine []byte) error { + + var err error + + // Splits line by space + splits := strings.SplitN(string(firstLine), " ", 3) + + // We must have at least 3 parts + if len(splits) < 3 { + return fmt.Errorf("invalid first SIP line: '%s'", string(firstLine)) + } + + // Determine the SIP packet type + if strings.HasPrefix(splits[0], "SIP") { + + // --> Response + s.IsResponse = true + + // Validate SIP Version + s.Version, err = GetSIPVersion(splits[0]) + if err != nil { + return err + } + + // Compute code + s.ResponseCode, err = strconv.Atoi(splits[1]) + if err != nil { + return err + } + + // Compute status line + s.ResponseStatus = splits[2] + + } else { + + // --> Request + + // Validate method + s.Method, err = GetSIPMethod(splits[0]) + if err != nil { + return err + } + + s.RequestURI = splits[1] + + // Validate SIP Version + s.Version, err = GetSIPVersion(splits[2]) + if err != nil { + return err + } + } + + return nil +} + +// ParseHeader will parse a SIP Header +// SIP Headers are quite simple, there are colon separated name and value +// Headers can be spread over multiple lines +// +// Examples of header : +// +// CSeq: 1 REGISTER +// Via: SIP/2.0/UDP there.com:5060 +// Authorization:Digest username="UserB", +// realm="MCI WorldCom SIP", +// nonce="1cec4341ae6cbe5a359ea9c8e88df84f", opaque="", +// uri="sip:ss2.wcom.com", response="71ba27c64bd01de719686aa4590d5824" +// +func (s *SIP) ParseHeader(header []byte) (err error) { + + // Ignore empty headers + if len(header) == 0 { + return + } + + // Check if this is the following of last header + // RFC 3261 - 7.3.1 - Header Field Format specify that following lines of + // multiline headers must begin by SP or TAB + if header[0] == '\t' || header[0] == ' ' { + + header = bytes.TrimSpace(header) + s.Headers[s.lastHeaderParsed][len(s.Headers[s.lastHeaderParsed])-1] += fmt.Sprintf(" %s", string(header)) + return + } + + // Find the ':' to separate header name and value + index := bytes.Index(header, []byte(":")) + if index >= 0 { + + headerName := strings.ToLower(string(bytes.Trim(header[:index], " "))) + headerValue := string(bytes.Trim(header[index+1:], " ")) + + // Add header to object + s.Headers[headerName] = append(s.Headers[headerName], headerValue) + s.lastHeaderParsed = headerName + + // Compute specific headers + err = s.ParseSpecificHeaders(headerName, headerValue) + if err != nil { + return err + } + } + + return nil +} + +// ParseSpecificHeaders will parse some specific key values from +// specific headers like CSeq or Content-Length integer values +func (s *SIP) ParseSpecificHeaders(headerName string, headerValue string) (err error) { + + switch headerName { + case "cseq": + + // CSeq header value is formatted like that : + // CSeq: 123 INVITE + // We split the value to parse Cseq integer value, and method + splits := strings.Split(headerValue, " ") + if len(splits) > 1 { + + // Parse Cseq + s.cseq, err = strconv.ParseInt(splits[0], 10, 64) + if err != nil { + return err + } + + // Validate method + if s.IsResponse { + s.Method, err = GetSIPMethod(splits[1]) + if err != nil { + return err + } + } + } + + case "content-length": + + // Parse Content-Length + s.contentLength, err = strconv.ParseInt(headerValue, 10, 64) + if err != nil { + return err + } + } + + return nil +} + +// GetAllHeaders will return the full headers of the +// current SIP packets in a map[string][]string +func (s *SIP) GetAllHeaders() map[string][]string { + return s.Headers +} + +// GetHeader will return all the headers with +// the specified name. +func (s *SIP) GetHeader(headerName string) []string { + headerName = strings.ToLower(headerName) + h := make([]string, 0) + if _, ok := s.Headers[headerName]; ok { + return s.Headers[headerName] + } + compactHeader := compactSipHeadersCorrespondance[headerName] + if _, ok := s.Headers[compactHeader]; ok { + return s.Headers[compactHeader] + } + return h +} + +// GetFirstHeader will return the first header with +// the specified name. If the current SIP packet has multiple +// headers with the same name, it returns the first. +func (s *SIP) GetFirstHeader(headerName string) string { + headers := s.GetHeader(headerName) + if len(headers) > 0 { + return headers[0] + } + return "" +} + +// +// Some handy getters for most used SIP headers +// + +// GetAuthorization will return the Authorization +// header of the current SIP packet +func (s *SIP) GetAuthorization() string { + return s.GetFirstHeader("Authorization") +} + +// GetFrom will return the From +// header of the current SIP packet +func (s *SIP) GetFrom() string { + return s.GetFirstHeader("From") +} + +// GetTo will return the To +// header of the current SIP packet +func (s *SIP) GetTo() string { + return s.GetFirstHeader("To") +} + +// GetContact will return the Contact +// header of the current SIP packet +func (s *SIP) GetContact() string { + return s.GetFirstHeader("Contact") +} + +// GetCallID will return the Call-ID +// header of the current SIP packet +func (s *SIP) GetCallID() string { + return s.GetFirstHeader("Call-ID") +} + +// GetUserAgent will return the User-Agent +// header of the current SIP packet +func (s *SIP) GetUserAgent() string { + return s.GetFirstHeader("User-Agent") +} + +// GetContentLength will return the parsed integer +// Content-Length header of the current SIP packet +func (s *SIP) GetContentLength() int64 { + return s.contentLength +} + +// GetCSeq will return the parsed integer CSeq header +// header of the current SIP packet +func (s *SIP) GetCSeq() int64 { + return s.cseq +} diff --git a/vendor/github.com/google/gopacket/layers/stp.go b/vendor/github.com/google/gopacket/layers/stp.go new file mode 100644 index 0000000000..bde7d7c8ef --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/stp.go @@ -0,0 +1,27 @@ +// Copyright 2017 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "github.com/google/gopacket" +) + +// STP decode spanning tree protocol packets to transport BPDU (bridge protocol data unit) message. +type STP struct { + BaseLayer +} + +// LayerType returns gopacket.LayerTypeSTP. +func (s *STP) LayerType() gopacket.LayerType { return LayerTypeSTP } + +func decodeSTP(data []byte, p gopacket.PacketBuilder) error { + stp := &STP{} + stp.Contents = data[:] + // TODO: parse the STP protocol into actual subfields. + p.AddLayer(stp) + return nil +} diff --git a/vendor/github.com/google/gopacket/layers/tcp.go b/vendor/github.com/google/gopacket/layers/tcp.go new file mode 100644 index 0000000000..bcdeb4b3c3 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/tcp.go @@ -0,0 +1,341 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + + "github.com/google/gopacket" +) + +// TCP is the layer for TCP headers. +type TCP struct { + BaseLayer + SrcPort, DstPort TCPPort + Seq uint32 + Ack uint32 + DataOffset uint8 + FIN, SYN, RST, PSH, ACK, URG, ECE, CWR, NS bool + Window uint16 + Checksum uint16 + Urgent uint16 + sPort, dPort []byte + Options []TCPOption + Padding []byte + opts [4]TCPOption + tcpipchecksum +} + +// TCPOptionKind represents a TCP option code. +type TCPOptionKind uint8 + +const ( + TCPOptionKindEndList = 0 + TCPOptionKindNop = 1 + TCPOptionKindMSS = 2 // len = 4 + TCPOptionKindWindowScale = 3 // len = 3 + TCPOptionKindSACKPermitted = 4 // len = 2 + TCPOptionKindSACK = 5 // len = n + TCPOptionKindEcho = 6 // len = 6, obsolete + TCPOptionKindEchoReply = 7 // len = 6, obsolete + TCPOptionKindTimestamps = 8 // len = 10 + TCPOptionKindPartialOrderConnectionPermitted = 9 // len = 2, obsolete + TCPOptionKindPartialOrderServiceProfile = 10 // len = 3, obsolete + TCPOptionKindCC = 11 // obsolete + TCPOptionKindCCNew = 12 // obsolete + TCPOptionKindCCEcho = 13 // obsolete + TCPOptionKindAltChecksum = 14 // len = 3, obsolete + TCPOptionKindAltChecksumData = 15 // len = n, obsolete +) + +func (k TCPOptionKind) String() string { + switch k { + case TCPOptionKindEndList: + return "EndList" + case TCPOptionKindNop: + return "NOP" + case TCPOptionKindMSS: + return "MSS" + case TCPOptionKindWindowScale: + return "WindowScale" + case TCPOptionKindSACKPermitted: + return "SACKPermitted" + case TCPOptionKindSACK: + return "SACK" + case TCPOptionKindEcho: + return "Echo" + case TCPOptionKindEchoReply: + return "EchoReply" + case TCPOptionKindTimestamps: + return "Timestamps" + case TCPOptionKindPartialOrderConnectionPermitted: + return "PartialOrderConnectionPermitted" + case TCPOptionKindPartialOrderServiceProfile: + return "PartialOrderServiceProfile" + case TCPOptionKindCC: + return "CC" + case TCPOptionKindCCNew: + return "CCNew" + case TCPOptionKindCCEcho: + return "CCEcho" + case TCPOptionKindAltChecksum: + return "AltChecksum" + case TCPOptionKindAltChecksumData: + return "AltChecksumData" + default: + return fmt.Sprintf("Unknown(%d)", k) + } +} + +type TCPOption struct { + OptionType TCPOptionKind + OptionLength uint8 + OptionData []byte +} + +func (t TCPOption) String() string { + hd := hex.EncodeToString(t.OptionData) + if len(hd) > 0 { + hd = " 0x" + hd + } + switch t.OptionType { + case TCPOptionKindMSS: + if len(t.OptionData) >= 2 { + return fmt.Sprintf("TCPOption(%s:%v%s)", + t.OptionType, + binary.BigEndian.Uint16(t.OptionData), + hd) + } + + case TCPOptionKindTimestamps: + if len(t.OptionData) == 8 { + return fmt.Sprintf("TCPOption(%s:%v/%v%s)", + t.OptionType, + binary.BigEndian.Uint32(t.OptionData[:4]), + binary.BigEndian.Uint32(t.OptionData[4:8]), + hd) + } + } + return fmt.Sprintf("TCPOption(%s:%s)", t.OptionType, hd) +} + +// LayerType returns gopacket.LayerTypeTCP +func (t *TCP) LayerType() gopacket.LayerType { return LayerTypeTCP } + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (t *TCP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var optionLength int + for _, o := range t.Options { + switch o.OptionType { + case 0, 1: + optionLength += 1 + default: + optionLength += 2 + len(o.OptionData) + } + } + if opts.FixLengths { + if rem := optionLength % 4; rem != 0 { + t.Padding = lotsOfZeros[:4-rem] + } + t.DataOffset = uint8((len(t.Padding) + optionLength + 20) / 4) + } + bytes, err := b.PrependBytes(20 + optionLength + len(t.Padding)) + if err != nil { + return err + } + binary.BigEndian.PutUint16(bytes, uint16(t.SrcPort)) + binary.BigEndian.PutUint16(bytes[2:], uint16(t.DstPort)) + binary.BigEndian.PutUint32(bytes[4:], t.Seq) + binary.BigEndian.PutUint32(bytes[8:], t.Ack) + binary.BigEndian.PutUint16(bytes[12:], t.flagsAndOffset()) + binary.BigEndian.PutUint16(bytes[14:], t.Window) + binary.BigEndian.PutUint16(bytes[18:], t.Urgent) + start := 20 + for _, o := range t.Options { + bytes[start] = byte(o.OptionType) + switch o.OptionType { + case 0, 1: + start++ + default: + if opts.FixLengths { + o.OptionLength = uint8(len(o.OptionData) + 2) + } + bytes[start+1] = o.OptionLength + copy(bytes[start+2:start+len(o.OptionData)+2], o.OptionData) + start += len(o.OptionData) + 2 + } + } + copy(bytes[start:], t.Padding) + if opts.ComputeChecksums { + // zero out checksum bytes in current serialization. + bytes[16] = 0 + bytes[17] = 0 + csum, err := t.computeChecksum(b.Bytes(), IPProtocolTCP) + if err != nil { + return err + } + t.Checksum = csum + } + binary.BigEndian.PutUint16(bytes[16:], t.Checksum) + return nil +} + +func (t *TCP) ComputeChecksum() (uint16, error) { + return t.computeChecksum(append(t.Contents, t.Payload...), IPProtocolTCP) +} + +func (t *TCP) flagsAndOffset() uint16 { + f := uint16(t.DataOffset) << 12 + if t.FIN { + f |= 0x0001 + } + if t.SYN { + f |= 0x0002 + } + if t.RST { + f |= 0x0004 + } + if t.PSH { + f |= 0x0008 + } + if t.ACK { + f |= 0x0010 + } + if t.URG { + f |= 0x0020 + } + if t.ECE { + f |= 0x0040 + } + if t.CWR { + f |= 0x0080 + } + if t.NS { + f |= 0x0100 + } + return f +} + +func (tcp *TCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 20 { + df.SetTruncated() + return fmt.Errorf("Invalid TCP header. Length %d less than 20", len(data)) + } + tcp.SrcPort = TCPPort(binary.BigEndian.Uint16(data[0:2])) + tcp.sPort = data[0:2] + tcp.DstPort = TCPPort(binary.BigEndian.Uint16(data[2:4])) + tcp.dPort = data[2:4] + tcp.Seq = binary.BigEndian.Uint32(data[4:8]) + tcp.Ack = binary.BigEndian.Uint32(data[8:12]) + tcp.DataOffset = data[12] >> 4 + tcp.FIN = data[13]&0x01 != 0 + tcp.SYN = data[13]&0x02 != 0 + tcp.RST = data[13]&0x04 != 0 + tcp.PSH = data[13]&0x08 != 0 + tcp.ACK = data[13]&0x10 != 0 + tcp.URG = data[13]&0x20 != 0 + tcp.ECE = data[13]&0x40 != 0 + tcp.CWR = data[13]&0x80 != 0 + tcp.NS = data[12]&0x01 != 0 + tcp.Window = binary.BigEndian.Uint16(data[14:16]) + tcp.Checksum = binary.BigEndian.Uint16(data[16:18]) + tcp.Urgent = binary.BigEndian.Uint16(data[18:20]) + if tcp.Options == nil { + // Pre-allocate to avoid allocating a slice. + tcp.Options = tcp.opts[:0] + } else { + tcp.Options = tcp.Options[:0] + } + tcp.Padding = tcp.Padding[:0] + if tcp.DataOffset < 5 { + return fmt.Errorf("Invalid TCP data offset %d < 5", tcp.DataOffset) + } + dataStart := int(tcp.DataOffset) * 4 + if dataStart > len(data) { + df.SetTruncated() + tcp.Payload = nil + tcp.Contents = data + return errors.New("TCP data offset greater than packet length") + } + tcp.Contents = data[:dataStart] + tcp.Payload = data[dataStart:] + // From here on, data points just to the header options. + data = data[20:dataStart] +OPTIONS: + for len(data) > 0 { + tcp.Options = append(tcp.Options, TCPOption{OptionType: TCPOptionKind(data[0])}) + opt := &tcp.Options[len(tcp.Options)-1] + switch opt.OptionType { + case TCPOptionKindEndList: // End of options + opt.OptionLength = 1 + tcp.Padding = data[1:] + break OPTIONS + case TCPOptionKindNop: // 1 byte padding + opt.OptionLength = 1 + default: + if len(data) < 2 { + df.SetTruncated() + return fmt.Errorf("Invalid TCP option length. Length %d less than 2", len(data)) + } + opt.OptionLength = data[1] + if opt.OptionLength < 2 { + return fmt.Errorf("Invalid TCP option length %d < 2", opt.OptionLength) + } else if int(opt.OptionLength) > len(data) { + df.SetTruncated() + return fmt.Errorf("Invalid TCP option length %d exceeds remaining %d bytes", opt.OptionLength, len(data)) + } + opt.OptionData = data[2:opt.OptionLength] + } + data = data[opt.OptionLength:] + } + return nil +} + +func (t *TCP) CanDecode() gopacket.LayerClass { + return LayerTypeTCP +} + +func (t *TCP) NextLayerType() gopacket.LayerType { + lt := t.DstPort.LayerType() + if lt == gopacket.LayerTypePayload { + lt = t.SrcPort.LayerType() + } + return lt +} + +func decodeTCP(data []byte, p gopacket.PacketBuilder) error { + tcp := &TCP{} + err := tcp.DecodeFromBytes(data, p) + p.AddLayer(tcp) + p.SetTransportLayer(tcp) + if err != nil { + return err + } + if p.DecodeOptions().DecodeStreamsAsDatagrams { + return p.NextDecoder(tcp.NextLayerType()) + } else { + return p.NextDecoder(gopacket.LayerTypePayload) + } +} + +func (t *TCP) TransportFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointTCPPort, t.sPort, t.dPort) +} + +// For testing only +func (t *TCP) SetInternalPortsForTesting() { + t.sPort = make([]byte, 2) + t.dPort = make([]byte, 2) + binary.BigEndian.PutUint16(t.sPort, uint16(t.SrcPort)) + binary.BigEndian.PutUint16(t.dPort, uint16(t.DstPort)) +} diff --git a/vendor/github.com/google/gopacket/layers/tcpip.go b/vendor/github.com/google/gopacket/layers/tcpip.go new file mode 100644 index 0000000000..64ba51cc75 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/tcpip.go @@ -0,0 +1,104 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "errors" + "fmt" + + "github.com/google/gopacket" +) + +// Checksum computation for TCP/UDP. +type tcpipchecksum struct { + pseudoheader tcpipPseudoHeader +} + +type tcpipPseudoHeader interface { + pseudoheaderChecksum() (uint32, error) +} + +func (ip *IPv4) pseudoheaderChecksum() (csum uint32, err error) { + if err := ip.AddressTo4(); err != nil { + return 0, err + } + csum += (uint32(ip.SrcIP[0]) + uint32(ip.SrcIP[2])) << 8 + csum += uint32(ip.SrcIP[1]) + uint32(ip.SrcIP[3]) + csum += (uint32(ip.DstIP[0]) + uint32(ip.DstIP[2])) << 8 + csum += uint32(ip.DstIP[1]) + uint32(ip.DstIP[3]) + return csum, nil +} + +func (ip *IPv6) pseudoheaderChecksum() (csum uint32, err error) { + if err := ip.AddressTo16(); err != nil { + return 0, err + } + for i := 0; i < 16; i += 2 { + csum += uint32(ip.SrcIP[i]) << 8 + csum += uint32(ip.SrcIP[i+1]) + csum += uint32(ip.DstIP[i]) << 8 + csum += uint32(ip.DstIP[i+1]) + } + return csum, nil +} + +// Calculate the TCP/IP checksum defined in rfc1071. The passed-in csum is any +// initial checksum data that's already been computed. +func tcpipChecksum(data []byte, csum uint32) uint16 { + // to handle odd lengths, we loop to length - 1, incrementing by 2, then + // handle the last byte specifically by checking against the original + // length. + length := len(data) - 1 + for i := 0; i < length; i += 2 { + // For our test packet, doing this manually is about 25% faster + // (740 ns vs. 1000ns) than doing it by calling binary.BigEndian.Uint16. + csum += uint32(data[i]) << 8 + csum += uint32(data[i+1]) + } + if len(data)%2 == 1 { + csum += uint32(data[length]) << 8 + } + for csum > 0xffff { + csum = (csum >> 16) + (csum & 0xffff) + } + return ^uint16(csum) +} + +// computeChecksum computes a TCP or UDP checksum. headerAndPayload is the +// serialized TCP or UDP header plus its payload, with the checksum zero'd +// out. headerProtocol is the IP protocol number of the upper-layer header. +func (c *tcpipchecksum) computeChecksum(headerAndPayload []byte, headerProtocol IPProtocol) (uint16, error) { + if c.pseudoheader == nil { + return 0, errors.New("TCP/IP layer 4 checksum cannot be computed without network layer... call SetNetworkLayerForChecksum to set which layer to use") + } + length := uint32(len(headerAndPayload)) + csum, err := c.pseudoheader.pseudoheaderChecksum() + if err != nil { + return 0, err + } + csum += uint32(headerProtocol) + csum += length & 0xffff + csum += length >> 16 + return tcpipChecksum(headerAndPayload, csum), nil +} + +// SetNetworkLayerForChecksum tells this layer which network layer is wrapping it. +// This is needed for computing the checksum when serializing, since TCP/IP transport +// layer checksums depends on fields in the IPv4 or IPv6 layer that contains it. +// The passed in layer must be an *IPv4 or *IPv6. +func (i *tcpipchecksum) SetNetworkLayerForChecksum(l gopacket.NetworkLayer) error { + switch v := l.(type) { + case *IPv4: + i.pseudoheader = v + case *IPv6: + i.pseudoheader = v + default: + return fmt.Errorf("cannot use layer type %v for tcp checksum network layer", l.LayerType()) + } + return nil +} diff --git a/vendor/github.com/google/gopacket/layers/test_creator.py b/vendor/github.com/google/gopacket/layers/test_creator.py new file mode 100644 index 0000000000..c92d2765a2 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/test_creator.py @@ -0,0 +1,103 @@ +#!/usr/bin/python +# Copyright 2012 Google, Inc. All rights reserved. + +"""TestCreator creates test templates from pcap files.""" + +import argparse +import base64 +import glob +import re +import string +import subprocess +import sys + + +class Packet(object): + """Helper class encapsulating packet from a pcap file.""" + + def __init__(self, packet_lines): + self.packet_lines = packet_lines + self.data = self._DecodeText(packet_lines) + + @classmethod + def _DecodeText(cls, packet_lines): + packet_bytes = [] + # First line is timestamp and stuff, skip it. + # Format: 0x0010: 0000 0020 3aff 3ffe 0000 0000 0000 0000 ....:.?......... + + for line in packet_lines[1:]: + m = re.match(r'\s+0x[a-f\d]+:\s+((?:[\da-f]{2,4}\s)*)', line, re.IGNORECASE) + if m is None: continue + for hexpart in m.group(1).split(): + packet_bytes.append(base64.b16decode(hexpart.upper())) + return ''.join(packet_bytes) + + def Test(self, name, link_type): + """Yields a test using this packet, as a set of lines.""" + yield '// testPacket%s is the packet:' % name + for line in self.packet_lines: + yield '// ' + line + yield 'var testPacket%s = []byte{' % name + data = list(self.data) + while data: + linebytes, data = data[:16], data[16:] + yield ''.join(['\t'] + ['0x%02x, ' % ord(c) for c in linebytes]) + yield '}' + yield 'func TestPacket%s(t *testing.T) {' % name + yield '\tp := gopacket.NewPacket(testPacket%s, LinkType%s, gopacket.Default)' % (name, link_type) + yield '\tif p.ErrorLayer() != nil {' + yield '\t\tt.Error("Failed to decode packet:", p.ErrorLayer().Error())' + yield '\t}' + yield '\tcheckLayers(p, []gopacket.LayerType{LayerType%s, FILL_ME_IN_WITH_ACTUAL_LAYERS}, t)' % link_type + yield '}' + yield 'func BenchmarkDecodePacket%s(b *testing.B) {' % name + yield '\tfor i := 0; i < b.N; i++ {' + yield '\t\tgopacket.NewPacket(testPacket%s, LinkType%s, gopacket.NoCopy)' % (name, link_type) + yield '\t}' + yield '}' + + + +def GetTcpdumpOutput(filename): + """Runs tcpdump on the given file, returning output as string.""" + return subprocess.check_output( + ['tcpdump', '-XX', '-s', '0', '-n', '-r', filename]) + + +def TcpdumpOutputToPackets(output): + """Reads a pcap file with TCPDump, yielding Packet objects.""" + pdata = [] + for line in output.splitlines(): + if line[0] not in string.whitespace and pdata: + yield Packet(pdata) + pdata = [] + pdata.append(line) + if pdata: + yield Packet(pdata) + + +def main(): + class CustomHelpFormatter(argparse.ArgumentDefaultsHelpFormatter): + def _format_usage(self, usage, actions, groups, prefix=None): + header =('TestCreator creates gopacket tests using a pcap file.\n\n' + 'Tests are written to standard out... they can then be \n' + 'copied into the file of your choice and modified as \n' + 'you see.\n\n') + return header + argparse.ArgumentDefaultsHelpFormatter._format_usage( + self, usage, actions, groups, prefix) + + parser = argparse.ArgumentParser(formatter_class=CustomHelpFormatter) + parser.add_argument('--link_type', default='Ethernet', help='the link type (default: %(default)s)') + parser.add_argument('--name', default='Packet%d', help='the layer type, must have "%d" inside it') + parser.add_argument('files', metavar='file.pcap', type=str, nargs='+', help='the files to process') + + args = parser.parse_args() + + for arg in args.files: + for path in glob.glob(arg): + for i, packet in enumerate(TcpdumpOutputToPackets(GetTcpdumpOutput(path))): + print '\n'.join(packet.Test( + args.name % i, args.link_type)) + +if __name__ == '__main__': + main() diff --git a/vendor/github.com/google/gopacket/layers/tls.go b/vendor/github.com/google/gopacket/layers/tls.go new file mode 100644 index 0000000000..5a155d455a --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/tls.go @@ -0,0 +1,283 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/google/gopacket" +) + +// TLSType defines the type of data after the TLS Record +type TLSType uint8 + +// TLSType known values. +const ( + TLSChangeCipherSpec TLSType = 20 + TLSAlert TLSType = 21 + TLSHandshake TLSType = 22 + TLSApplicationData TLSType = 23 + TLSUnknown TLSType = 255 +) + +// String shows the register type nicely formatted +func (tt TLSType) String() string { + switch tt { + default: + return "Unknown" + case TLSChangeCipherSpec: + return "Change Cipher Spec" + case TLSAlert: + return "Alert" + case TLSHandshake: + return "Handshake" + case TLSApplicationData: + return "Application Data" + } +} + +// TLSVersion represents the TLS version in numeric format +type TLSVersion uint16 + +// Strings shows the TLS version nicely formatted +func (tv TLSVersion) String() string { + switch tv { + default: + return "Unknown" + case 0x0200: + return "SSL 2.0" + case 0x0300: + return "SSL 3.0" + case 0x0301: + return "TLS 1.0" + case 0x0302: + return "TLS 1.1" + case 0x0303: + return "TLS 1.2" + case 0x0304: + return "TLS 1.3" + } +} + +// TLS is specified in RFC 5246 +// +// TLS Record Protocol +// 0 1 2 3 4 5 6 7 8 +// +--+--+--+--+--+--+--+--+ +// | Content Type | +// +--+--+--+--+--+--+--+--+ +// | Version (major) | +// +--+--+--+--+--+--+--+--+ +// | Version (minor) | +// +--+--+--+--+--+--+--+--+ +// | Length | +// +--+--+--+--+--+--+--+--+ +// | Length | +// +--+--+--+--+--+--+--+--+ + +// TLS is actually a slide of TLSrecord structures +type TLS struct { + BaseLayer + + // TLS Records + ChangeCipherSpec []TLSChangeCipherSpecRecord + Handshake []TLSHandshakeRecord + AppData []TLSAppDataRecord + Alert []TLSAlertRecord +} + +// TLSRecordHeader contains all the information that each TLS Record types should have +type TLSRecordHeader struct { + ContentType TLSType + Version TLSVersion + Length uint16 +} + +// LayerType returns gopacket.LayerTypeTLS. +func (t *TLS) LayerType() gopacket.LayerType { return LayerTypeTLS } + +// decodeTLS decodes the byte slice into a TLS type. It also +// setups the application Layer in PacketBuilder. +func decodeTLS(data []byte, p gopacket.PacketBuilder) error { + t := &TLS{} + err := t.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(t) + p.SetApplicationLayer(t) + return nil +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + t.BaseLayer.Contents = data + t.BaseLayer.Payload = nil + + t.ChangeCipherSpec = t.ChangeCipherSpec[:0] + t.Handshake = t.Handshake[:0] + t.AppData = t.AppData[:0] + t.Alert = t.Alert[:0] + + return t.decodeTLSRecords(data, df) +} + +func (t *TLS) decodeTLSRecords(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 5 { + df.SetTruncated() + return errors.New("TLS record too short") + } + + // since there are no further layers, the baselayer's content is + // pointing to this layer + // TODO: Consider removing this + t.BaseLayer = BaseLayer{Contents: data[:len(data)]} + + var h TLSRecordHeader + h.ContentType = TLSType(data[0]) + h.Version = TLSVersion(binary.BigEndian.Uint16(data[1:3])) + h.Length = binary.BigEndian.Uint16(data[3:5]) + + if h.ContentType.String() == "Unknown" { + return errors.New("Unknown TLS record type") + } + + hl := 5 // header length + tl := hl + int(h.Length) + if len(data) < tl { + df.SetTruncated() + return errors.New("TLS packet length mismatch") + } + + switch h.ContentType { + default: + return errors.New("Unknown TLS record type") + case TLSChangeCipherSpec: + var r TLSChangeCipherSpecRecord + e := r.decodeFromBytes(h, data[hl:tl], df) + if e != nil { + return e + } + t.ChangeCipherSpec = append(t.ChangeCipherSpec, r) + case TLSAlert: + var r TLSAlertRecord + e := r.decodeFromBytes(h, data[hl:tl], df) + if e != nil { + return e + } + t.Alert = append(t.Alert, r) + case TLSHandshake: + var r TLSHandshakeRecord + e := r.decodeFromBytes(h, data[hl:tl], df) + if e != nil { + return e + } + t.Handshake = append(t.Handshake, r) + case TLSApplicationData: + var r TLSAppDataRecord + e := r.decodeFromBytes(h, data[hl:tl], df) + if e != nil { + return e + } + t.AppData = append(t.AppData, r) + } + + if len(data) == tl { + return nil + } + return t.decodeTLSRecords(data[tl:len(data)], df) +} + +// CanDecode implements gopacket.DecodingLayer. +func (t *TLS) CanDecode() gopacket.LayerClass { + return LayerTypeTLS +} + +// NextLayerType implements gopacket.DecodingLayer. +func (t *TLS) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// Payload returns nil, since TLS encrypted payload is inside TLSAppDataRecord +func (t *TLS) Payload() []byte { + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +func (t *TLS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + totalLength := 0 + for _, record := range t.ChangeCipherSpec { + if opts.FixLengths { + record.Length = 1 + } + totalLength += 5 + 1 // length of header + record + } + for range t.Handshake { + totalLength += 5 + // TODO + } + for _, record := range t.AppData { + if opts.FixLengths { + record.Length = uint16(len(record.Payload)) + } + totalLength += 5 + len(record.Payload) + } + for _, record := range t.Alert { + if len(record.EncryptedMsg) == 0 { + if opts.FixLengths { + record.Length = 2 + } + totalLength += 5 + 2 + } else { + if opts.FixLengths { + record.Length = uint16(len(record.EncryptedMsg)) + } + totalLength += 5 + len(record.EncryptedMsg) + } + } + data, err := b.PrependBytes(totalLength) + if err != nil { + return err + } + off := 0 + for _, record := range t.ChangeCipherSpec { + off = encodeHeader(record.TLSRecordHeader, data, off) + data[off] = byte(record.Message) + off++ + } + for _, record := range t.Handshake { + off = encodeHeader(record.TLSRecordHeader, data, off) + // TODO + } + for _, record := range t.AppData { + off = encodeHeader(record.TLSRecordHeader, data, off) + copy(data[off:], record.Payload) + off += len(record.Payload) + } + for _, record := range t.Alert { + off = encodeHeader(record.TLSRecordHeader, data, off) + if len(record.EncryptedMsg) == 0 { + data[off] = byte(record.Level) + data[off+1] = byte(record.Description) + off += 2 + } else { + copy(data[off:], record.EncryptedMsg) + off += len(record.EncryptedMsg) + } + } + return nil +} + +func encodeHeader(header TLSRecordHeader, data []byte, offset int) int { + data[offset] = byte(header.ContentType) + binary.BigEndian.PutUint16(data[offset+1:], uint16(header.Version)) + binary.BigEndian.PutUint16(data[offset+3:], header.Length) + + return offset + 5 +} diff --git a/vendor/github.com/google/gopacket/layers/tls_alert.go b/vendor/github.com/google/gopacket/layers/tls_alert.go new file mode 100644 index 0000000000..0c5aee0218 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/tls_alert.go @@ -0,0 +1,165 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "errors" + "fmt" + + "github.com/google/gopacket" +) + +// TLSAlertLevel defines the alert level data type +type TLSAlertLevel uint8 + +// TLSAlertDescr defines the alert descrption data type +type TLSAlertDescr uint8 + +const ( + TLSAlertWarning TLSAlertLevel = 1 + TLSAlertFatal TLSAlertLevel = 2 + TLSAlertUnknownLevel TLSAlertLevel = 255 + + TLSAlertCloseNotify TLSAlertDescr = 0 + TLSAlertUnexpectedMessage TLSAlertDescr = 10 + TLSAlertBadRecordMac TLSAlertDescr = 20 + TLSAlertDecryptionFailedRESERVED TLSAlertDescr = 21 + TLSAlertRecordOverflow TLSAlertDescr = 22 + TLSAlertDecompressionFailure TLSAlertDescr = 30 + TLSAlertHandshakeFailure TLSAlertDescr = 40 + TLSAlertNoCertificateRESERVED TLSAlertDescr = 41 + TLSAlertBadCertificate TLSAlertDescr = 42 + TLSAlertUnsupportedCertificate TLSAlertDescr = 43 + TLSAlertCertificateRevoked TLSAlertDescr = 44 + TLSAlertCertificateExpired TLSAlertDescr = 45 + TLSAlertCertificateUnknown TLSAlertDescr = 46 + TLSAlertIllegalParameter TLSAlertDescr = 47 + TLSAlertUnknownCa TLSAlertDescr = 48 + TLSAlertAccessDenied TLSAlertDescr = 49 + TLSAlertDecodeError TLSAlertDescr = 50 + TLSAlertDecryptError TLSAlertDescr = 51 + TLSAlertExportRestrictionRESERVED TLSAlertDescr = 60 + TLSAlertProtocolVersion TLSAlertDescr = 70 + TLSAlertInsufficientSecurity TLSAlertDescr = 71 + TLSAlertInternalError TLSAlertDescr = 80 + TLSAlertUserCanceled TLSAlertDescr = 90 + TLSAlertNoRenegotiation TLSAlertDescr = 100 + TLSAlertUnsupportedExtension TLSAlertDescr = 110 + TLSAlertUnknownDescription TLSAlertDescr = 255 +) + +// TLS Alert +// 0 1 2 3 4 5 6 7 8 +// +--+--+--+--+--+--+--+--+ +// | Level | +// +--+--+--+--+--+--+--+--+ +// | Description | +// +--+--+--+--+--+--+--+--+ + +// TLSAlertRecord contains all the information that each Alert Record type should have +type TLSAlertRecord struct { + TLSRecordHeader + + Level TLSAlertLevel + Description TLSAlertDescr + + EncryptedMsg []byte +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLSAlertRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error { + // TLS Record Header + t.ContentType = h.ContentType + t.Version = h.Version + t.Length = h.Length + + if len(data) < 2 { + df.SetTruncated() + return errors.New("TLS Alert packet too short") + } + + if t.Length == 2 { + t.Level = TLSAlertLevel(data[0]) + t.Description = TLSAlertDescr(data[1]) + } else { + t.Level = TLSAlertUnknownLevel + t.Description = TLSAlertUnknownDescription + t.EncryptedMsg = data + } + + return nil +} + +// Strings shows the TLS alert level nicely formatted +func (al TLSAlertLevel) String() string { + switch al { + default: + return fmt.Sprintf("Unknown(%d)", al) + case TLSAlertWarning: + return "Warning" + case TLSAlertFatal: + return "Fatal" + } +} + +// Strings shows the TLS alert description nicely formatted +func (ad TLSAlertDescr) String() string { + switch ad { + default: + return "Unknown" + case TLSAlertCloseNotify: + return "close_notify" + case TLSAlertUnexpectedMessage: + return "unexpected_message" + case TLSAlertBadRecordMac: + return "bad_record_mac" + case TLSAlertDecryptionFailedRESERVED: + return "decryption_failed_RESERVED" + case TLSAlertRecordOverflow: + return "record_overflow" + case TLSAlertDecompressionFailure: + return "decompression_failure" + case TLSAlertHandshakeFailure: + return "handshake_failure" + case TLSAlertNoCertificateRESERVED: + return "no_certificate_RESERVED" + case TLSAlertBadCertificate: + return "bad_certificate" + case TLSAlertUnsupportedCertificate: + return "unsupported_certificate" + case TLSAlertCertificateRevoked: + return "certificate_revoked" + case TLSAlertCertificateExpired: + return "certificate_expired" + case TLSAlertCertificateUnknown: + return "certificate_unknown" + case TLSAlertIllegalParameter: + return "illegal_parameter" + case TLSAlertUnknownCa: + return "unknown_ca" + case TLSAlertAccessDenied: + return "access_denied" + case TLSAlertDecodeError: + return "decode_error" + case TLSAlertDecryptError: + return "decrypt_error" + case TLSAlertExportRestrictionRESERVED: + return "export_restriction_RESERVED" + case TLSAlertProtocolVersion: + return "protocol_version" + case TLSAlertInsufficientSecurity: + return "insufficient_security" + case TLSAlertInternalError: + return "internal_error" + case TLSAlertUserCanceled: + return "user_canceled" + case TLSAlertNoRenegotiation: + return "no_renegotiation" + case TLSAlertUnsupportedExtension: + return "unsupported_extension" + } +} diff --git a/vendor/github.com/google/gopacket/layers/tls_appdata.go b/vendor/github.com/google/gopacket/layers/tls_appdata.go new file mode 100644 index 0000000000..dedd1d587b --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/tls_appdata.go @@ -0,0 +1,34 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "errors" + + "github.com/google/gopacket" +) + +// TLSAppDataRecord contains all the information that each AppData Record types should have +type TLSAppDataRecord struct { + TLSRecordHeader + Payload []byte +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLSAppDataRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error { + // TLS Record Header + t.ContentType = h.ContentType + t.Version = h.Version + t.Length = h.Length + + if len(data) != int(t.Length) { + return errors.New("TLS Application Data length mismatch") + } + + t.Payload = data + return nil +} diff --git a/vendor/github.com/google/gopacket/layers/tls_cipherspec.go b/vendor/github.com/google/gopacket/layers/tls_cipherspec.go new file mode 100644 index 0000000000..8f3dc62ba5 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/tls_cipherspec.go @@ -0,0 +1,64 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "errors" + + "github.com/google/gopacket" +) + +// TLSchangeCipherSpec defines the message value inside ChangeCipherSpec Record +type TLSchangeCipherSpec uint8 + +const ( + TLSChangecipherspecMessage TLSchangeCipherSpec = 1 + TLSChangecipherspecUnknown TLSchangeCipherSpec = 255 +) + +// TLS Change Cipher Spec +// 0 1 2 3 4 5 6 7 8 +// +--+--+--+--+--+--+--+--+ +// | Message | +// +--+--+--+--+--+--+--+--+ + +// TLSChangeCipherSpecRecord defines the type of data inside ChangeCipherSpec Record +type TLSChangeCipherSpecRecord struct { + TLSRecordHeader + + Message TLSchangeCipherSpec +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLSChangeCipherSpecRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error { + // TLS Record Header + t.ContentType = h.ContentType + t.Version = h.Version + t.Length = h.Length + + if len(data) != 1 { + df.SetTruncated() + return errors.New("TLS Change Cipher Spec record incorrect length") + } + + t.Message = TLSchangeCipherSpec(data[0]) + if t.Message != TLSChangecipherspecMessage { + t.Message = TLSChangecipherspecUnknown + } + + return nil +} + +// String shows the message value nicely formatted +func (ccs TLSchangeCipherSpec) String() string { + switch ccs { + default: + return "Unknown" + case TLSChangecipherspecMessage: + return "Change Cipher Spec Message" + } +} diff --git a/vendor/github.com/google/gopacket/layers/tls_handshake.go b/vendor/github.com/google/gopacket/layers/tls_handshake.go new file mode 100644 index 0000000000..e45e2c7cbc --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/tls_handshake.go @@ -0,0 +1,28 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "github.com/google/gopacket" +) + +// TLSHandshakeRecord defines the structure of a Handshare Record +type TLSHandshakeRecord struct { + TLSRecordHeader +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLSHandshakeRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error { + // TLS Record Header + t.ContentType = h.ContentType + t.Version = h.Version + t.Length = h.Length + + // TODO + + return nil +} diff --git a/vendor/github.com/google/gopacket/layers/udp.go b/vendor/github.com/google/gopacket/layers/udp.go new file mode 100644 index 0000000000..97e81c69fc --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/udp.go @@ -0,0 +1,133 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + + "github.com/google/gopacket" +) + +// UDP is the layer for UDP headers. +type UDP struct { + BaseLayer + SrcPort, DstPort UDPPort + Length uint16 + Checksum uint16 + sPort, dPort []byte + tcpipchecksum +} + +// LayerType returns gopacket.LayerTypeUDP +func (u *UDP) LayerType() gopacket.LayerType { return LayerTypeUDP } + +func (udp *UDP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + df.SetTruncated() + return fmt.Errorf("Invalid UDP header. Length %d less than 8", len(data)) + } + udp.SrcPort = UDPPort(binary.BigEndian.Uint16(data[0:2])) + udp.sPort = data[0:2] + udp.DstPort = UDPPort(binary.BigEndian.Uint16(data[2:4])) + udp.dPort = data[2:4] + udp.Length = binary.BigEndian.Uint16(data[4:6]) + udp.Checksum = binary.BigEndian.Uint16(data[6:8]) + udp.BaseLayer = BaseLayer{Contents: data[:8]} + switch { + case udp.Length >= 8: + hlen := int(udp.Length) + if hlen > len(data) { + df.SetTruncated() + hlen = len(data) + } + udp.Payload = data[8:hlen] + case udp.Length == 0: // Jumbogram, use entire rest of data + udp.Payload = data[8:] + default: + return fmt.Errorf("UDP packet too small: %d bytes", udp.Length) + } + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (u *UDP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var jumbo bool + + payload := b.Bytes() + if _, ok := u.pseudoheader.(*IPv6); ok { + if len(payload)+8 > 65535 { + jumbo = true + } + } + bytes, err := b.PrependBytes(8) + if err != nil { + return err + } + binary.BigEndian.PutUint16(bytes, uint16(u.SrcPort)) + binary.BigEndian.PutUint16(bytes[2:], uint16(u.DstPort)) + if opts.FixLengths { + if jumbo { + u.Length = 0 + } else { + u.Length = uint16(len(payload)) + 8 + } + } + binary.BigEndian.PutUint16(bytes[4:], u.Length) + if opts.ComputeChecksums { + // zero out checksum bytes + bytes[6] = 0 + bytes[7] = 0 + csum, err := u.computeChecksum(b.Bytes(), IPProtocolUDP) + if err != nil { + return err + } + u.Checksum = csum + } + binary.BigEndian.PutUint16(bytes[6:], u.Checksum) + return nil +} + +func (u *UDP) CanDecode() gopacket.LayerClass { + return LayerTypeUDP +} + +// NextLayerType use the destination port to select the +// right next decoder. It tries first to decode via the +// destination port, then the source port. +func (u *UDP) NextLayerType() gopacket.LayerType { + if lt := u.DstPort.LayerType(); lt != gopacket.LayerTypePayload { + return lt + } + return u.SrcPort.LayerType() +} + +func decodeUDP(data []byte, p gopacket.PacketBuilder) error { + udp := &UDP{} + err := udp.DecodeFromBytes(data, p) + p.AddLayer(udp) + p.SetTransportLayer(udp) + if err != nil { + return err + } + return p.NextDecoder(udp.NextLayerType()) +} + +func (u *UDP) TransportFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointUDPPort, u.sPort, u.dPort) +} + +// For testing only +func (u *UDP) SetInternalPortsForTesting() { + u.sPort = make([]byte, 2) + u.dPort = make([]byte, 2) + binary.BigEndian.PutUint16(u.sPort, uint16(u.SrcPort)) + binary.BigEndian.PutUint16(u.dPort, uint16(u.DstPort)) +} diff --git a/vendor/github.com/google/gopacket/layers/udplite.go b/vendor/github.com/google/gopacket/layers/udplite.go new file mode 100644 index 0000000000..7d84c51463 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/udplite.go @@ -0,0 +1,44 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "github.com/google/gopacket" +) + +// UDPLite is the layer for UDP-Lite headers (rfc 3828). +type UDPLite struct { + BaseLayer + SrcPort, DstPort UDPLitePort + ChecksumCoverage uint16 + Checksum uint16 + sPort, dPort []byte +} + +// LayerType returns gopacket.LayerTypeUDPLite +func (u *UDPLite) LayerType() gopacket.LayerType { return LayerTypeUDPLite } + +func decodeUDPLite(data []byte, p gopacket.PacketBuilder) error { + udp := &UDPLite{ + SrcPort: UDPLitePort(binary.BigEndian.Uint16(data[0:2])), + sPort: data[0:2], + DstPort: UDPLitePort(binary.BigEndian.Uint16(data[2:4])), + dPort: data[2:4], + ChecksumCoverage: binary.BigEndian.Uint16(data[4:6]), + Checksum: binary.BigEndian.Uint16(data[6:8]), + BaseLayer: BaseLayer{data[:8], data[8:]}, + } + p.AddLayer(udp) + p.SetTransportLayer(udp) + return p.NextDecoder(gopacket.LayerTypePayload) +} + +func (u *UDPLite) TransportFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointUDPLitePort, u.sPort, u.dPort) +} diff --git a/vendor/github.com/google/gopacket/layers/usb.go b/vendor/github.com/google/gopacket/layers/usb.go new file mode 100644 index 0000000000..d611e0fd06 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/usb.go @@ -0,0 +1,292 @@ +// Copyright 2014 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "github.com/google/gopacket" +) + +type USBEventType uint8 + +const ( + USBEventTypeSubmit USBEventType = 'S' + USBEventTypeComplete USBEventType = 'C' + USBEventTypeError USBEventType = 'E' +) + +func (a USBEventType) String() string { + switch a { + case USBEventTypeSubmit: + return "SUBMIT" + case USBEventTypeComplete: + return "COMPLETE" + case USBEventTypeError: + return "ERROR" + default: + return "Unknown event type" + } +} + +type USBRequestBlockSetupRequest uint8 + +const ( + USBRequestBlockSetupRequestGetStatus USBRequestBlockSetupRequest = 0x00 + USBRequestBlockSetupRequestClearFeature USBRequestBlockSetupRequest = 0x01 + USBRequestBlockSetupRequestSetFeature USBRequestBlockSetupRequest = 0x03 + USBRequestBlockSetupRequestSetAddress USBRequestBlockSetupRequest = 0x05 + USBRequestBlockSetupRequestGetDescriptor USBRequestBlockSetupRequest = 0x06 + USBRequestBlockSetupRequestSetDescriptor USBRequestBlockSetupRequest = 0x07 + USBRequestBlockSetupRequestGetConfiguration USBRequestBlockSetupRequest = 0x08 + USBRequestBlockSetupRequestSetConfiguration USBRequestBlockSetupRequest = 0x09 + USBRequestBlockSetupRequestSetIdle USBRequestBlockSetupRequest = 0x0a +) + +func (a USBRequestBlockSetupRequest) String() string { + switch a { + case USBRequestBlockSetupRequestGetStatus: + return "GET_STATUS" + case USBRequestBlockSetupRequestClearFeature: + return "CLEAR_FEATURE" + case USBRequestBlockSetupRequestSetFeature: + return "SET_FEATURE" + case USBRequestBlockSetupRequestSetAddress: + return "SET_ADDRESS" + case USBRequestBlockSetupRequestGetDescriptor: + return "GET_DESCRIPTOR" + case USBRequestBlockSetupRequestSetDescriptor: + return "SET_DESCRIPTOR" + case USBRequestBlockSetupRequestGetConfiguration: + return "GET_CONFIGURATION" + case USBRequestBlockSetupRequestSetConfiguration: + return "SET_CONFIGURATION" + case USBRequestBlockSetupRequestSetIdle: + return "SET_IDLE" + default: + return "UNKNOWN" + } +} + +type USBTransportType uint8 + +const ( + USBTransportTypeTransferIn USBTransportType = 0x80 // Indicates send or receive + USBTransportTypeIsochronous USBTransportType = 0x00 // Isochronous transfers occur continuously and periodically. They typically contain time sensitive information, such as an audio or video stream. + USBTransportTypeInterrupt USBTransportType = 0x01 // Interrupt transfers are typically non-periodic, small device "initiated" communication requiring bounded latency, such as pointing devices or keyboards. + USBTransportTypeControl USBTransportType = 0x02 // Control transfers are typically used for command and status operations. + USBTransportTypeBulk USBTransportType = 0x03 // Bulk transfers can be used for large bursty data, using all remaining available bandwidth, no guarantees on bandwidth or latency, such as file transfers. +) + +type USBDirectionType uint8 + +const ( + USBDirectionTypeUnknown USBDirectionType = iota + USBDirectionTypeIn + USBDirectionTypeOut +) + +func (a USBDirectionType) String() string { + switch a { + case USBDirectionTypeIn: + return "In" + case USBDirectionTypeOut: + return "Out" + default: + return "Unknown direction type" + } +} + +// The reference at http://www.beyondlogic.org/usbnutshell/usb1.shtml contains more information about the protocol. +type USB struct { + BaseLayer + ID uint64 + EventType USBEventType + TransferType USBTransportType + Direction USBDirectionType + EndpointNumber uint8 + DeviceAddress uint8 + BusID uint16 + TimestampSec int64 + TimestampUsec int32 + Setup bool + Data bool + Status int32 + UrbLength uint32 + UrbDataLength uint32 + + UrbInterval uint32 + UrbStartFrame uint32 + UrbCopyOfTransferFlags uint32 + IsoNumDesc uint32 +} + +func (u *USB) LayerType() gopacket.LayerType { return LayerTypeUSB } + +func (m *USB) NextLayerType() gopacket.LayerType { + if m.Setup { + return LayerTypeUSBRequestBlockSetup + } else if m.Data { + } + + return m.TransferType.LayerType() +} + +func decodeUSB(data []byte, p gopacket.PacketBuilder) error { + d := &USB{} + + return decodingLayerDecoder(d, data, p) +} + +func (m *USB) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 40 { + df.SetTruncated() + return errors.New("USB < 40 bytes") + } + m.ID = binary.LittleEndian.Uint64(data[0:8]) + m.EventType = USBEventType(data[8]) + m.TransferType = USBTransportType(data[9]) + + m.EndpointNumber = data[10] & 0x7f + if data[10]&uint8(USBTransportTypeTransferIn) > 0 { + m.Direction = USBDirectionTypeIn + } else { + m.Direction = USBDirectionTypeOut + } + + m.DeviceAddress = data[11] + m.BusID = binary.LittleEndian.Uint16(data[12:14]) + + if uint(data[14]) == 0 { + m.Setup = true + } + + if uint(data[15]) == 0 { + m.Data = true + } + + m.TimestampSec = int64(binary.LittleEndian.Uint64(data[16:24])) + m.TimestampUsec = int32(binary.LittleEndian.Uint32(data[24:28])) + m.Status = int32(binary.LittleEndian.Uint32(data[28:32])) + m.UrbLength = binary.LittleEndian.Uint32(data[32:36]) + m.UrbDataLength = binary.LittleEndian.Uint32(data[36:40]) + + m.Contents = data[:40] + m.Payload = data[40:] + + if m.Setup { + m.Payload = data[40:] + } else if m.Data { + m.Payload = data[uint32(len(data))-m.UrbDataLength:] + } + + // if 64 bit, dissect_linux_usb_pseudo_header_ext + if false { + m.UrbInterval = binary.LittleEndian.Uint32(data[40:44]) + m.UrbStartFrame = binary.LittleEndian.Uint32(data[44:48]) + m.UrbDataLength = binary.LittleEndian.Uint32(data[48:52]) + m.IsoNumDesc = binary.LittleEndian.Uint32(data[52:56]) + m.Contents = data[:56] + m.Payload = data[56:] + } + + // crc5 or crc16 + // eop (end of packet) + + return nil +} + +type USBRequestBlockSetup struct { + BaseLayer + RequestType uint8 + Request USBRequestBlockSetupRequest + Value uint16 + Index uint16 + Length uint16 +} + +func (u *USBRequestBlockSetup) LayerType() gopacket.LayerType { return LayerTypeUSBRequestBlockSetup } + +func (m *USBRequestBlockSetup) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func (m *USBRequestBlockSetup) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.RequestType = data[0] + m.Request = USBRequestBlockSetupRequest(data[1]) + m.Value = binary.LittleEndian.Uint16(data[2:4]) + m.Index = binary.LittleEndian.Uint16(data[4:6]) + m.Length = binary.LittleEndian.Uint16(data[6:8]) + m.Contents = data[:8] + m.Payload = data[8:] + return nil +} + +func decodeUSBRequestBlockSetup(data []byte, p gopacket.PacketBuilder) error { + d := &USBRequestBlockSetup{} + return decodingLayerDecoder(d, data, p) +} + +type USBControl struct { + BaseLayer +} + +func (u *USBControl) LayerType() gopacket.LayerType { return LayerTypeUSBControl } + +func (m *USBControl) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func (m *USBControl) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +func decodeUSBControl(data []byte, p gopacket.PacketBuilder) error { + d := &USBControl{} + return decodingLayerDecoder(d, data, p) +} + +type USBInterrupt struct { + BaseLayer +} + +func (u *USBInterrupt) LayerType() gopacket.LayerType { return LayerTypeUSBInterrupt } + +func (m *USBInterrupt) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func (m *USBInterrupt) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +func decodeUSBInterrupt(data []byte, p gopacket.PacketBuilder) error { + d := &USBInterrupt{} + return decodingLayerDecoder(d, data, p) +} + +type USBBulk struct { + BaseLayer +} + +func (u *USBBulk) LayerType() gopacket.LayerType { return LayerTypeUSBBulk } + +func (m *USBBulk) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func (m *USBBulk) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +func decodeUSBBulk(data []byte, p gopacket.PacketBuilder) error { + d := &USBBulk{} + return decodingLayerDecoder(d, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/vrrp.go b/vendor/github.com/google/gopacket/layers/vrrp.go new file mode 100644 index 0000000000..ffaafe6a77 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/vrrp.go @@ -0,0 +1,156 @@ +// Copyright 2016 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "net" + + "github.com/google/gopacket" +) + +/* + This layer provides decoding for Virtual Router Redundancy Protocol (VRRP) v2. + https://tools.ietf.org/html/rfc3768#section-5 + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |Version| Type | Virtual Rtr ID| Priority | Count IP Addrs| + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Auth Type | Adver Int | Checksum | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | IP Address (1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | . | + | . | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | IP Address (n) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Authentication Data (1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Authentication Data (2) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +*/ + +type VRRPv2Type uint8 +type VRRPv2AuthType uint8 + +const ( + VRRPv2Advertisement VRRPv2Type = 0x01 // router advertisement +) + +// String conversions for VRRP message types +func (v VRRPv2Type) String() string { + switch v { + case VRRPv2Advertisement: + return "VRRPv2 Advertisement" + default: + return "" + } +} + +const ( + VRRPv2AuthNoAuth VRRPv2AuthType = 0x00 // No Authentication + VRRPv2AuthReserved1 VRRPv2AuthType = 0x01 // Reserved field 1 + VRRPv2AuthReserved2 VRRPv2AuthType = 0x02 // Reserved field 2 +) + +func (v VRRPv2AuthType) String() string { + switch v { + case VRRPv2AuthNoAuth: + return "No Authentication" + case VRRPv2AuthReserved1: + return "Reserved" + case VRRPv2AuthReserved2: + return "Reserved" + default: + return "" + } +} + +// VRRPv2 represents an VRRP v2 message. +type VRRPv2 struct { + BaseLayer + Version uint8 // The version field specifies the VRRP protocol version of this packet (v2) + Type VRRPv2Type // The type field specifies the type of this VRRP packet. The only type defined in v2 is ADVERTISEMENT + VirtualRtrID uint8 // identifies the virtual router this packet is reporting status for + Priority uint8 // specifies the sending VRRP router's priority for the virtual router (100 = default) + CountIPAddr uint8 // The number of IP addresses contained in this VRRP advertisement. + AuthType VRRPv2AuthType // identifies the authentication method being utilized + AdverInt uint8 // The Advertisement interval indicates the time interval (in seconds) between ADVERTISEMENTS. The default is 1 second + Checksum uint16 // used to detect data corruption in the VRRP message. + IPAddress []net.IP // one or more IP addresses associated with the virtual router. Specified in the CountIPAddr field. +} + +// LayerType returns LayerTypeVRRP for VRRP v2 message. +func (v *VRRPv2) LayerType() gopacket.LayerType { return LayerTypeVRRP } + +func (v *VRRPv2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + + v.BaseLayer = BaseLayer{Contents: data[:len(data)]} + v.Version = data[0] >> 4 // high nibble == VRRP version. We're expecting v2 + + v.Type = VRRPv2Type(data[0] & 0x0F) // low nibble == VRRP type. Expecting 1 (advertisement) + if v.Type != 1 { + // rfc3768: A packet with unknown type MUST be discarded. + return errors.New("Unrecognized VRRPv2 type field.") + } + + v.VirtualRtrID = data[1] + v.Priority = data[2] + + v.CountIPAddr = data[3] + if v.CountIPAddr < 1 { + return errors.New("VRRPv2 number of IP addresses is not valid.") + } + + v.AuthType = VRRPv2AuthType(data[4]) + v.AdverInt = uint8(data[5]) + v.Checksum = binary.BigEndian.Uint16(data[6:8]) + + // populate the IPAddress field. The number of addresses is specified in the v.CountIPAddr field + // offset references the starting byte containing the list of ip addresses + offset := 8 + for i := uint8(0); i < v.CountIPAddr; i++ { + v.IPAddress = append(v.IPAddress, data[offset:offset+4]) + offset += 4 + } + + // any trailing packets here may be authentication data and *should* be ignored in v2 as per RFC + // + // 5.3.10. Authentication Data + // + // The authentication string is currently only used to maintain + // backwards compatibility with RFC 2338. It SHOULD be set to zero on + // transmission and ignored on reception. + return nil +} + +// CanDecode specifies the layer type in which we are attempting to unwrap. +func (v *VRRPv2) CanDecode() gopacket.LayerClass { + return LayerTypeVRRP +} + +// NextLayerType specifies the next layer that should be decoded. VRRP does not contain any further payload, so we set to 0 +func (v *VRRPv2) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// The VRRP packet does not include payload data. Setting byte slice to nil +func (v *VRRPv2) Payload() []byte { + return nil +} + +// decodeVRRP will parse VRRP v2 +func decodeVRRP(data []byte, p gopacket.PacketBuilder) error { + if len(data) < 8 { + return errors.New("Not a valid VRRP packet. Packet length is too small.") + } + v := &VRRPv2{} + return decodingLayerDecoder(v, data, p) +} diff --git a/vendor/github.com/google/gopacket/layers/vxlan.go b/vendor/github.com/google/gopacket/layers/vxlan.go new file mode 100644 index 0000000000..e479cd81f3 --- /dev/null +++ b/vendor/github.com/google/gopacket/layers/vxlan.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/google/gopacket" +) + +// VXLAN is specifed in RFC 7348 https://tools.ietf.org/html/rfc7348 +// G, D, A, Group Policy ID from https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00 +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// 0 8 16 24 32 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | 24 bit VXLAN Network Identifier | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +// VXLAN is a VXLAN packet header +type VXLAN struct { + BaseLayer + ValidIDFlag bool // 'I' bit per RFC 7348 + VNI uint32 // 'VXLAN Network Identifier' 24 bits per RFC 7348 + GBPExtension bool // 'G' bit per Group Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00 + GBPDontLearn bool // 'D' bit per Group Policy + GBPApplied bool // 'A' bit per Group Policy + GBPGroupPolicyID uint16 // 'Group Policy ID' 16 bits per Group Policy +} + +// LayerType returns LayerTypeVXLAN +func (vx *VXLAN) LayerType() gopacket.LayerType { return LayerTypeVXLAN } + +// CanDecode returns the layer type this DecodingLayer can decode +func (vx *VXLAN) CanDecode() gopacket.LayerClass { + return LayerTypeVXLAN +} + +// NextLayerType retuns the next layer we should see after vxlan +func (vx *VXLAN) NextLayerType() gopacket.LayerType { + return LayerTypeEthernet +} + +// DecodeFromBytes takes a byte buffer and decodes +func (vx *VXLAN) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + return errors.New("vxlan packet too small") + } + // VNI is a 24bit number, Uint32 requires 32 bits + var buf [4]byte + copy(buf[1:], data[4:7]) + + // RFC 7348 https://tools.ietf.org/html/rfc7348 + vx.ValidIDFlag = data[0]&0x08 > 0 // 'I' bit per RFC7348 + vx.VNI = binary.BigEndian.Uint32(buf[:]) // VXLAN Network Identifier per RFC7348 + + // Group Based Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00 + vx.GBPExtension = data[0]&0x80 > 0 // 'G' bit per the group policy draft + vx.GBPDontLearn = data[1]&0x40 > 0 // 'D' bit - the egress VTEP MUST NOT learn the source address of the encapsulated frame. + vx.GBPApplied = data[1]&0x80 > 0 // 'A' bit - indicates that the group policy has already been applied to this packet. + vx.GBPGroupPolicyID = binary.BigEndian.Uint16(data[2:4]) // Policy ID as per the group policy draft + + // Layer information + const vxlanLength = 8 + vx.Contents = data[:vxlanLength] + vx.Payload = data[vxlanLength:] + + return nil + +} + +func decodeVXLAN(data []byte, p gopacket.PacketBuilder) error { + vx := &VXLAN{} + err := vx.DecodeFromBytes(data, p) + if err != nil { + return err + } + + p.AddLayer(vx) + return p.NextDecoder(LinkTypeEthernet) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (vx *VXLAN) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(8) + if err != nil { + return err + } + + // PrependBytes does not guarantee that bytes are zeroed. Setting flags via OR requires that they start off at zero + bytes[0] = 0 + bytes[1] = 0 + + if vx.ValidIDFlag { + bytes[0] |= 0x08 + } + if vx.GBPExtension { + bytes[0] |= 0x80 + } + if vx.GBPDontLearn { + bytes[1] |= 0x40 + } + if vx.GBPApplied { + bytes[1] |= 0x80 + } + + binary.BigEndian.PutUint16(bytes[2:4], vx.GBPGroupPolicyID) + if vx.VNI >= 1<<24 { + return fmt.Errorf("Virtual Network Identifier = %x exceeds max for 24-bit uint", vx.VNI) + } + binary.BigEndian.PutUint32(bytes[4:8], vx.VNI<<8) + return nil +} diff --git a/vendor/github.com/google/gopacket/layers_decoder.go b/vendor/github.com/google/gopacket/layers_decoder.go new file mode 100644 index 0000000000..8c1f108cfc --- /dev/null +++ b/vendor/github.com/google/gopacket/layers_decoder.go @@ -0,0 +1,101 @@ +// Copyright 2019 The GoPacket Authors. All rights reserved. + +package gopacket + +// Created by gen.go, don't edit manually +// Generated at 2019-06-18 11:37:31.308731293 +0600 +06 m=+0.000842599 + +// LayersDecoder returns DecodingLayerFunc for specified +// DecodingLayerContainer, LayerType value to start decoding with and +// some DecodeFeedback. +func LayersDecoder(dl DecodingLayerContainer, first LayerType, df DecodeFeedback) DecodingLayerFunc { + firstDec, ok := dl.Decoder(first) + if !ok { + return func([]byte, *[]LayerType) (LayerType, error) { + return first, nil + } + } + if dlc, ok := dl.(DecodingLayerSparse); ok { + return func(data []byte, decoded *[]LayerType) (LayerType, error) { + *decoded = (*decoded)[:0] // Truncated decoded layers. + typ := first + decoder := firstDec + for { + if err := decoder.DecodeFromBytes(data, df); err != nil { + return LayerTypeZero, err + } + *decoded = append(*decoded, typ) + typ = decoder.NextLayerType() + if data = decoder.LayerPayload(); len(data) == 0 { + break + } + if decoder, ok = dlc.Decoder(typ); !ok { + return typ, nil + } + } + return LayerTypeZero, nil + } + } + if dlc, ok := dl.(DecodingLayerArray); ok { + return func(data []byte, decoded *[]LayerType) (LayerType, error) { + *decoded = (*decoded)[:0] // Truncated decoded layers. + typ := first + decoder := firstDec + for { + if err := decoder.DecodeFromBytes(data, df); err != nil { + return LayerTypeZero, err + } + *decoded = append(*decoded, typ) + typ = decoder.NextLayerType() + if data = decoder.LayerPayload(); len(data) == 0 { + break + } + if decoder, ok = dlc.Decoder(typ); !ok { + return typ, nil + } + } + return LayerTypeZero, nil + } + } + if dlc, ok := dl.(DecodingLayerMap); ok { + return func(data []byte, decoded *[]LayerType) (LayerType, error) { + *decoded = (*decoded)[:0] // Truncated decoded layers. + typ := first + decoder := firstDec + for { + if err := decoder.DecodeFromBytes(data, df); err != nil { + return LayerTypeZero, err + } + *decoded = append(*decoded, typ) + typ = decoder.NextLayerType() + if data = decoder.LayerPayload(); len(data) == 0 { + break + } + if decoder, ok = dlc.Decoder(typ); !ok { + return typ, nil + } + } + return LayerTypeZero, nil + } + } + dlc := dl + return func(data []byte, decoded *[]LayerType) (LayerType, error) { + *decoded = (*decoded)[:0] // Truncated decoded layers. + typ := first + decoder := firstDec + for { + if err := decoder.DecodeFromBytes(data, df); err != nil { + return LayerTypeZero, err + } + *decoded = append(*decoded, typ) + typ = decoder.NextLayerType() + if data = decoder.LayerPayload(); len(data) == 0 { + break + } + if decoder, ok = dlc.Decoder(typ); !ok { + return typ, nil + } + } + return LayerTypeZero, nil + } +} diff --git a/vendor/github.com/google/gopacket/layertype.go b/vendor/github.com/google/gopacket/layertype.go new file mode 100644 index 0000000000..3abfee1e9b --- /dev/null +++ b/vendor/github.com/google/gopacket/layertype.go @@ -0,0 +1,111 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "fmt" + "strconv" +) + +// LayerType is a unique identifier for each type of layer. This enumeration +// does not match with any externally available numbering scheme... it's solely +// usable/useful within this library as a means for requesting layer types +// (see Packet.Layer) and determining which types of layers have been decoded. +// +// New LayerTypes may be created by calling gopacket.RegisterLayerType. +type LayerType int64 + +// LayerTypeMetadata contains metadata associated with each LayerType. +type LayerTypeMetadata struct { + // Name is the string returned by each layer type's String method. + Name string + // Decoder is the decoder to use when the layer type is passed in as a + // Decoder. + Decoder Decoder +} + +type layerTypeMetadata struct { + inUse bool + LayerTypeMetadata +} + +// DecodersByLayerName maps layer names to decoders for those layers. +// This allows users to specify decoders by name to a program and have that +// program pick the correct decoder accordingly. +var DecodersByLayerName = map[string]Decoder{} + +const maxLayerType = 2000 + +var ltMeta [maxLayerType]layerTypeMetadata +var ltMetaMap = map[LayerType]layerTypeMetadata{} + +// RegisterLayerType creates a new layer type and registers it globally. +// The number passed in must be unique, or a runtime panic will occur. Numbers +// 0-999 are reserved for the gopacket library. Numbers 1000-1999 should be +// used for common application-specific types, and are very fast. Any other +// number (negative or >= 2000) may be used for uncommon application-specific +// types, and are somewhat slower (they require a map lookup over an array +// index). +func RegisterLayerType(num int, meta LayerTypeMetadata) LayerType { + if 0 <= num && num < maxLayerType { + if ltMeta[num].inUse { + panic("Layer type already exists") + } + } else { + if ltMetaMap[LayerType(num)].inUse { + panic("Layer type already exists") + } + } + return OverrideLayerType(num, meta) +} + +// OverrideLayerType acts like RegisterLayerType, except that if the layer type +// has already been registered, it overrides the metadata with the passed-in +// metadata intead of panicing. +func OverrideLayerType(num int, meta LayerTypeMetadata) LayerType { + if 0 <= num && num < maxLayerType { + ltMeta[num] = layerTypeMetadata{ + inUse: true, + LayerTypeMetadata: meta, + } + } else { + ltMetaMap[LayerType(num)] = layerTypeMetadata{ + inUse: true, + LayerTypeMetadata: meta, + } + } + DecodersByLayerName[meta.Name] = meta.Decoder + return LayerType(num) +} + +// Decode decodes the given data using the decoder registered with the layer +// type. +func (t LayerType) Decode(data []byte, c PacketBuilder) error { + var d Decoder + if 0 <= int(t) && int(t) < maxLayerType { + d = ltMeta[int(t)].Decoder + } else { + d = ltMetaMap[t].Decoder + } + if d != nil { + return d.Decode(data, c) + } + return fmt.Errorf("Layer type %v has no associated decoder", t) +} + +// String returns the string associated with this layer type. +func (t LayerType) String() (s string) { + if 0 <= int(t) && int(t) < maxLayerType { + s = ltMeta[int(t)].Name + } else { + s = ltMetaMap[t].Name + } + if s == "" { + s = strconv.Itoa(int(t)) + } + return +} diff --git a/vendor/github.com/google/gopacket/packet.go b/vendor/github.com/google/gopacket/packet.go new file mode 100644 index 0000000000..3a7c4b3d80 --- /dev/null +++ b/vendor/github.com/google/gopacket/packet.go @@ -0,0 +1,864 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "net" + "os" + "reflect" + "runtime/debug" + "strings" + "syscall" + "time" +) + +// CaptureInfo provides standardized information about a packet captured off +// the wire or read from a file. +type CaptureInfo struct { + // Timestamp is the time the packet was captured, if that is known. + Timestamp time.Time + // CaptureLength is the total number of bytes read off of the wire. + CaptureLength int + // Length is the size of the original packet. Should always be >= + // CaptureLength. + Length int + // InterfaceIndex + InterfaceIndex int + // The packet source can place ancillary data of various types here. + // For example, the afpacket source can report the VLAN of captured + // packets this way. + AncillaryData []interface{} +} + +// PacketMetadata contains metadata for a packet. +type PacketMetadata struct { + CaptureInfo + // Truncated is true if packet decoding logic detects that there are fewer + // bytes in the packet than are detailed in various headers (for example, if + // the number of bytes in the IPv4 contents/payload is less than IPv4.Length). + // This is also set automatically for packets captured off the wire if + // CaptureInfo.CaptureLength < CaptureInfo.Length. + Truncated bool +} + +// Packet is the primary object used by gopacket. Packets are created by a +// Decoder's Decode call. A packet is made up of a set of Data, which +// is broken into a number of Layers as it is decoded. +type Packet interface { + //// Functions for outputting the packet as a human-readable string: + //// ------------------------------------------------------------------ + // String returns a human-readable string representation of the packet. + // It uses LayerString on each layer to output the layer. + String() string + // Dump returns a verbose human-readable string representation of the packet, + // including a hex dump of all layers. It uses LayerDump on each layer to + // output the layer. + Dump() string + + //// Functions for accessing arbitrary packet layers: + //// ------------------------------------------------------------------ + // Layers returns all layers in this packet, computing them as necessary + Layers() []Layer + // Layer returns the first layer in this packet of the given type, or nil + Layer(LayerType) Layer + // LayerClass returns the first layer in this packet of the given class, + // or nil. + LayerClass(LayerClass) Layer + + //// Functions for accessing specific types of packet layers. These functions + //// return the first layer of each type found within the packet. + //// ------------------------------------------------------------------ + // LinkLayer returns the first link layer in the packet + LinkLayer() LinkLayer + // NetworkLayer returns the first network layer in the packet + NetworkLayer() NetworkLayer + // TransportLayer returns the first transport layer in the packet + TransportLayer() TransportLayer + // ApplicationLayer returns the first application layer in the packet + ApplicationLayer() ApplicationLayer + // ErrorLayer is particularly useful, since it returns nil if the packet + // was fully decoded successfully, and non-nil if an error was encountered + // in decoding and the packet was only partially decoded. Thus, its output + // can be used to determine if the entire packet was able to be decoded. + ErrorLayer() ErrorLayer + + //// Functions for accessing data specific to the packet: + //// ------------------------------------------------------------------ + // Data returns the set of bytes that make up this entire packet. + Data() []byte + // Metadata returns packet metadata associated with this packet. + Metadata() *PacketMetadata +} + +// packet contains all the information we need to fulfill the Packet interface, +// and its two "subclasses" (yes, no such thing in Go, bear with me), +// eagerPacket and lazyPacket, provide eager and lazy decoding logic around the +// various functions needed to access this information. +type packet struct { + // data contains the entire packet data for a packet + data []byte + // initialLayers is space for an initial set of layers already created inside + // the packet. + initialLayers [6]Layer + // layers contains each layer we've already decoded + layers []Layer + // last is the last layer added to the packet + last Layer + // metadata is the PacketMetadata for this packet + metadata PacketMetadata + + decodeOptions DecodeOptions + + // Pointers to the various important layers + link LinkLayer + network NetworkLayer + transport TransportLayer + application ApplicationLayer + failure ErrorLayer +} + +func (p *packet) SetTruncated() { + p.metadata.Truncated = true +} + +func (p *packet) SetLinkLayer(l LinkLayer) { + if p.link == nil { + p.link = l + } +} + +func (p *packet) SetNetworkLayer(l NetworkLayer) { + if p.network == nil { + p.network = l + } +} + +func (p *packet) SetTransportLayer(l TransportLayer) { + if p.transport == nil { + p.transport = l + } +} + +func (p *packet) SetApplicationLayer(l ApplicationLayer) { + if p.application == nil { + p.application = l + } +} + +func (p *packet) SetErrorLayer(l ErrorLayer) { + if p.failure == nil { + p.failure = l + } +} + +func (p *packet) AddLayer(l Layer) { + p.layers = append(p.layers, l) + p.last = l +} + +func (p *packet) DumpPacketData() { + fmt.Fprint(os.Stderr, p.packetDump()) + os.Stderr.Sync() +} + +func (p *packet) Metadata() *PacketMetadata { + return &p.metadata +} + +func (p *packet) Data() []byte { + return p.data +} + +func (p *packet) DecodeOptions() *DecodeOptions { + return &p.decodeOptions +} + +func (p *packet) addFinalDecodeError(err error, stack []byte) { + fail := &DecodeFailure{err: err, stack: stack} + if p.last == nil { + fail.data = p.data + } else { + fail.data = p.last.LayerPayload() + } + p.AddLayer(fail) + p.SetErrorLayer(fail) +} + +func (p *packet) recoverDecodeError() { + if !p.decodeOptions.SkipDecodeRecovery { + if r := recover(); r != nil { + p.addFinalDecodeError(fmt.Errorf("%v", r), debug.Stack()) + } + } +} + +// LayerString outputs an individual layer as a string. The layer is output +// in a single line, with no trailing newline. This function is specifically +// designed to do the right thing for most layers... it follows the following +// rules: +// * If the Layer has a String function, just output that. +// * Otherwise, output all exported fields in the layer, recursing into +// exported slices and structs. +// NOTE: This is NOT THE SAME AS fmt's "%#v". %#v will output both exported +// and unexported fields... many times packet layers contain unexported stuff +// that would just mess up the output of the layer, see for example the +// Payload layer and it's internal 'data' field, which contains a large byte +// array that would really mess up formatting. +func LayerString(l Layer) string { + return fmt.Sprintf("%v\t%s", l.LayerType(), layerString(reflect.ValueOf(l), false, false)) +} + +// Dumper dumps verbose information on a value. If a layer type implements +// Dumper, then its LayerDump() string will include the results in its output. +type Dumper interface { + Dump() string +} + +// LayerDump outputs a very verbose string representation of a layer. Its +// output is a concatenation of LayerString(l) and hex.Dump(l.LayerContents()). +// It contains newlines and ends with a newline. +func LayerDump(l Layer) string { + var b bytes.Buffer + b.WriteString(LayerString(l)) + b.WriteByte('\n') + if d, ok := l.(Dumper); ok { + dump := d.Dump() + if dump != "" { + b.WriteString(dump) + if dump[len(dump)-1] != '\n' { + b.WriteByte('\n') + } + } + } + b.WriteString(hex.Dump(l.LayerContents())) + return b.String() +} + +// layerString outputs, recursively, a layer in a "smart" way. See docs for +// LayerString for more details. +// +// Params: +// i - value to write out +// anonymous: if we're currently recursing an anonymous member of a struct +// writeSpace: if we've already written a value in a struct, and need to +// write a space before writing more. This happens when we write various +// anonymous values, and need to keep writing more. +func layerString(v reflect.Value, anonymous bool, writeSpace bool) string { + // Let String() functions take precedence. + if v.CanInterface() { + if s, ok := v.Interface().(fmt.Stringer); ok { + return s.String() + } + } + // Reflect, and spit out all the exported fields as key=value. + switch v.Type().Kind() { + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return "nil" + } + r := v.Elem() + return layerString(r, anonymous, writeSpace) + case reflect.Struct: + var b bytes.Buffer + typ := v.Type() + if !anonymous { + b.WriteByte('{') + } + for i := 0; i < v.NumField(); i++ { + // Check if this is upper-case. + ftype := typ.Field(i) + f := v.Field(i) + if ftype.Anonymous { + anonStr := layerString(f, true, writeSpace) + writeSpace = writeSpace || anonStr != "" + b.WriteString(anonStr) + } else if ftype.PkgPath == "" { // exported + if writeSpace { + b.WriteByte(' ') + } + writeSpace = true + fmt.Fprintf(&b, "%s=%s", typ.Field(i).Name, layerString(f, false, writeSpace)) + } + } + if !anonymous { + b.WriteByte('}') + } + return b.String() + case reflect.Slice: + var b bytes.Buffer + b.WriteByte('[') + if v.Len() > 4 { + fmt.Fprintf(&b, "..%d..", v.Len()) + } else { + for j := 0; j < v.Len(); j++ { + if j != 0 { + b.WriteString(", ") + } + b.WriteString(layerString(v.Index(j), false, false)) + } + } + b.WriteByte(']') + return b.String() + } + return fmt.Sprintf("%v", v.Interface()) +} + +const ( + longBytesLength = 128 +) + +// LongBytesGoString returns a string representation of the byte slice shortened +// using the format '{ ... ( bytes)}' if it +// exceeds a predetermined length. Can be used to avoid filling the display with +// very long byte strings. +func LongBytesGoString(buf []byte) string { + if len(buf) < longBytesLength { + return fmt.Sprintf("%#v", buf) + } + s := fmt.Sprintf("%#v", buf[:longBytesLength-1]) + s = strings.TrimSuffix(s, "}") + return fmt.Sprintf("%s ... (%d bytes)}", s, len(buf)) +} + +func baseLayerString(value reflect.Value) string { + t := value.Type() + content := value.Field(0) + c := make([]byte, content.Len()) + for i := range c { + c[i] = byte(content.Index(i).Uint()) + } + payload := value.Field(1) + p := make([]byte, payload.Len()) + for i := range p { + p[i] = byte(payload.Index(i).Uint()) + } + return fmt.Sprintf("%s{Contents:%s, Payload:%s}", t.String(), + LongBytesGoString(c), + LongBytesGoString(p)) +} + +func layerGoString(i interface{}, b *bytes.Buffer) { + if s, ok := i.(fmt.GoStringer); ok { + b.WriteString(s.GoString()) + return + } + + var v reflect.Value + var ok bool + if v, ok = i.(reflect.Value); !ok { + v = reflect.ValueOf(i) + } + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + if v.Kind() == reflect.Ptr { + b.WriteByte('&') + } + layerGoString(v.Elem().Interface(), b) + case reflect.Struct: + t := v.Type() + b.WriteString(t.String()) + b.WriteByte('{') + for i := 0; i < v.NumField(); i++ { + if i > 0 { + b.WriteString(", ") + } + if t.Field(i).Name == "BaseLayer" { + fmt.Fprintf(b, "BaseLayer:%s", baseLayerString(v.Field(i))) + } else if v.Field(i).Kind() == reflect.Struct { + fmt.Fprintf(b, "%s:", t.Field(i).Name) + layerGoString(v.Field(i), b) + } else if v.Field(i).Kind() == reflect.Ptr { + b.WriteByte('&') + layerGoString(v.Field(i), b) + } else { + fmt.Fprintf(b, "%s:%#v", t.Field(i).Name, v.Field(i)) + } + } + b.WriteByte('}') + default: + fmt.Fprintf(b, "%#v", i) + } +} + +// LayerGoString returns a representation of the layer in Go syntax, +// taking care to shorten "very long" BaseLayer byte slices +func LayerGoString(l Layer) string { + b := new(bytes.Buffer) + layerGoString(l, b) + return b.String() +} + +func (p *packet) packetString() string { + var b bytes.Buffer + fmt.Fprintf(&b, "PACKET: %d bytes", len(p.Data())) + if p.metadata.Truncated { + b.WriteString(", truncated") + } + if p.metadata.Length > 0 { + fmt.Fprintf(&b, ", wire length %d cap length %d", p.metadata.Length, p.metadata.CaptureLength) + } + if !p.metadata.Timestamp.IsZero() { + fmt.Fprintf(&b, " @ %v", p.metadata.Timestamp) + } + b.WriteByte('\n') + for i, l := range p.layers { + fmt.Fprintf(&b, "- Layer %d (%02d bytes) = %s\n", i+1, len(l.LayerContents()), LayerString(l)) + } + return b.String() +} + +func (p *packet) packetDump() string { + var b bytes.Buffer + fmt.Fprintf(&b, "-- FULL PACKET DATA (%d bytes) ------------------------------------\n%s", len(p.data), hex.Dump(p.data)) + for i, l := range p.layers { + fmt.Fprintf(&b, "--- Layer %d ---\n%s", i+1, LayerDump(l)) + } + return b.String() +} + +// eagerPacket is a packet implementation that does eager decoding. Upon +// initial construction, it decodes all the layers it can from packet data. +// eagerPacket implements Packet and PacketBuilder. +type eagerPacket struct { + packet +} + +var errNilDecoder = errors.New("NextDecoder passed nil decoder, probably an unsupported decode type") + +func (p *eagerPacket) NextDecoder(next Decoder) error { + if next == nil { + return errNilDecoder + } + if p.last == nil { + return errors.New("NextDecoder called, but no layers added yet") + } + d := p.last.LayerPayload() + if len(d) == 0 { + return nil + } + // Since we're eager, immediately call the next decoder. + return next.Decode(d, p) +} +func (p *eagerPacket) initialDecode(dec Decoder) { + defer p.recoverDecodeError() + err := dec.Decode(p.data, p) + if err != nil { + p.addFinalDecodeError(err, nil) + } +} +func (p *eagerPacket) LinkLayer() LinkLayer { + return p.link +} +func (p *eagerPacket) NetworkLayer() NetworkLayer { + return p.network +} +func (p *eagerPacket) TransportLayer() TransportLayer { + return p.transport +} +func (p *eagerPacket) ApplicationLayer() ApplicationLayer { + return p.application +} +func (p *eagerPacket) ErrorLayer() ErrorLayer { + return p.failure +} +func (p *eagerPacket) Layers() []Layer { + return p.layers +} +func (p *eagerPacket) Layer(t LayerType) Layer { + for _, l := range p.layers { + if l.LayerType() == t { + return l + } + } + return nil +} +func (p *eagerPacket) LayerClass(lc LayerClass) Layer { + for _, l := range p.layers { + if lc.Contains(l.LayerType()) { + return l + } + } + return nil +} +func (p *eagerPacket) String() string { return p.packetString() } +func (p *eagerPacket) Dump() string { return p.packetDump() } + +// lazyPacket does lazy decoding on its packet data. On construction it does +// no initial decoding. For each function call, it decodes only as many layers +// as are necessary to compute the return value for that function. +// lazyPacket implements Packet and PacketBuilder. +type lazyPacket struct { + packet + next Decoder +} + +func (p *lazyPacket) NextDecoder(next Decoder) error { + if next == nil { + return errNilDecoder + } + p.next = next + return nil +} +func (p *lazyPacket) decodeNextLayer() { + if p.next == nil { + return + } + d := p.data + if p.last != nil { + d = p.last.LayerPayload() + } + next := p.next + p.next = nil + // We've just set p.next to nil, so if we see we have no data, this should be + // the final call we get to decodeNextLayer if we return here. + if len(d) == 0 { + return + } + defer p.recoverDecodeError() + err := next.Decode(d, p) + if err != nil { + p.addFinalDecodeError(err, nil) + } +} +func (p *lazyPacket) LinkLayer() LinkLayer { + for p.link == nil && p.next != nil { + p.decodeNextLayer() + } + return p.link +} +func (p *lazyPacket) NetworkLayer() NetworkLayer { + for p.network == nil && p.next != nil { + p.decodeNextLayer() + } + return p.network +} +func (p *lazyPacket) TransportLayer() TransportLayer { + for p.transport == nil && p.next != nil { + p.decodeNextLayer() + } + return p.transport +} +func (p *lazyPacket) ApplicationLayer() ApplicationLayer { + for p.application == nil && p.next != nil { + p.decodeNextLayer() + } + return p.application +} +func (p *lazyPacket) ErrorLayer() ErrorLayer { + for p.failure == nil && p.next != nil { + p.decodeNextLayer() + } + return p.failure +} +func (p *lazyPacket) Layers() []Layer { + for p.next != nil { + p.decodeNextLayer() + } + return p.layers +} +func (p *lazyPacket) Layer(t LayerType) Layer { + for _, l := range p.layers { + if l.LayerType() == t { + return l + } + } + numLayers := len(p.layers) + for p.next != nil { + p.decodeNextLayer() + for _, l := range p.layers[numLayers:] { + if l.LayerType() == t { + return l + } + } + numLayers = len(p.layers) + } + return nil +} +func (p *lazyPacket) LayerClass(lc LayerClass) Layer { + for _, l := range p.layers { + if lc.Contains(l.LayerType()) { + return l + } + } + numLayers := len(p.layers) + for p.next != nil { + p.decodeNextLayer() + for _, l := range p.layers[numLayers:] { + if lc.Contains(l.LayerType()) { + return l + } + } + numLayers = len(p.layers) + } + return nil +} +func (p *lazyPacket) String() string { p.Layers(); return p.packetString() } +func (p *lazyPacket) Dump() string { p.Layers(); return p.packetDump() } + +// DecodeOptions tells gopacket how to decode a packet. +type DecodeOptions struct { + // Lazy decoding decodes the minimum number of layers needed to return data + // for a packet at each function call. Be careful using this with concurrent + // packet processors, as each call to packet.* could mutate the packet, and + // two concurrent function calls could interact poorly. + Lazy bool + // NoCopy decoding doesn't copy its input buffer into storage that's owned by + // the packet. If you can guarantee that the bytes underlying the slice + // passed into NewPacket aren't going to be modified, this can be faster. If + // there's any chance that those bytes WILL be changed, this will invalidate + // your packets. + NoCopy bool + // SkipDecodeRecovery skips over panic recovery during packet decoding. + // Normally, when packets decode, if a panic occurs, that panic is captured + // by a recover(), and a DecodeFailure layer is added to the packet detailing + // the issue. If this flag is set, panics are instead allowed to continue up + // the stack. + SkipDecodeRecovery bool + // DecodeStreamsAsDatagrams enables routing of application-level layers in the TCP + // decoder. If true, we should try to decode layers after TCP in single packets. + // This is disabled by default because the reassembly package drives the decoding + // of TCP payload data after reassembly. + DecodeStreamsAsDatagrams bool +} + +// Default decoding provides the safest (but slowest) method for decoding +// packets. It eagerly processes all layers (so it's concurrency-safe) and it +// copies its input buffer upon creation of the packet (so the packet remains +// valid if the underlying slice is modified. Both of these take time, +// though, so beware. If you can guarantee that the packet will only be used +// by one goroutine at a time, set Lazy decoding. If you can guarantee that +// the underlying slice won't change, set NoCopy decoding. +var Default = DecodeOptions{} + +// Lazy is a DecodeOptions with just Lazy set. +var Lazy = DecodeOptions{Lazy: true} + +// NoCopy is a DecodeOptions with just NoCopy set. +var NoCopy = DecodeOptions{NoCopy: true} + +// DecodeStreamsAsDatagrams is a DecodeOptions with just DecodeStreamsAsDatagrams set. +var DecodeStreamsAsDatagrams = DecodeOptions{DecodeStreamsAsDatagrams: true} + +// NewPacket creates a new Packet object from a set of bytes. The +// firstLayerDecoder tells it how to interpret the first layer from the bytes, +// future layers will be generated from that first layer automatically. +func NewPacket(data []byte, firstLayerDecoder Decoder, options DecodeOptions) Packet { + if !options.NoCopy { + dataCopy := make([]byte, len(data)) + copy(dataCopy, data) + data = dataCopy + } + if options.Lazy { + p := &lazyPacket{ + packet: packet{data: data, decodeOptions: options}, + next: firstLayerDecoder, + } + p.layers = p.initialLayers[:0] + // Crazy craziness: + // If the following return statemet is REMOVED, and Lazy is FALSE, then + // eager packet processing becomes 17% FASTER. No, there is no logical + // explanation for this. However, it's such a hacky micro-optimization that + // we really can't rely on it. It appears to have to do with the size the + // compiler guesses for this function's stack space, since one symptom is + // that with the return statement in place, we more than double calls to + // runtime.morestack/runtime.lessstack. We'll hope the compiler gets better + // over time and we get this optimization for free. Until then, we'll have + // to live with slower packet processing. + return p + } + p := &eagerPacket{ + packet: packet{data: data, decodeOptions: options}, + } + p.layers = p.initialLayers[:0] + p.initialDecode(firstLayerDecoder) + return p +} + +// PacketDataSource is an interface for some source of packet data. Users may +// create their own implementations, or use the existing implementations in +// gopacket/pcap (libpcap, allows reading from live interfaces or from +// pcap files) or gopacket/pfring (PF_RING, allows reading from live +// interfaces). +type PacketDataSource interface { + // ReadPacketData returns the next packet available from this data source. + // It returns: + // data: The bytes of an individual packet. + // ci: Metadata about the capture + // err: An error encountered while reading packet data. If err != nil, + // then data/ci will be ignored. + ReadPacketData() (data []byte, ci CaptureInfo, err error) +} + +// ConcatFinitePacketDataSources returns a PacketDataSource that wraps a set +// of internal PacketDataSources, each of which will stop with io.EOF after +// reading a finite number of packets. The returned PacketDataSource will +// return all packets from the first finite source, followed by all packets from +// the second, etc. Once all finite sources have returned io.EOF, the returned +// source will as well. +func ConcatFinitePacketDataSources(pds ...PacketDataSource) PacketDataSource { + c := concat(pds) + return &c +} + +type concat []PacketDataSource + +func (c *concat) ReadPacketData() (data []byte, ci CaptureInfo, err error) { + for len(*c) > 0 { + data, ci, err = (*c)[0].ReadPacketData() + if err == io.EOF { + *c = (*c)[1:] + continue + } + return + } + return nil, CaptureInfo{}, io.EOF +} + +// ZeroCopyPacketDataSource is an interface to pull packet data from sources +// that allow data to be returned without copying to a user-controlled buffer. +// It's very similar to PacketDataSource, except that the caller must be more +// careful in how the returned buffer is handled. +type ZeroCopyPacketDataSource interface { + // ZeroCopyReadPacketData returns the next packet available from this data source. + // It returns: + // data: The bytes of an individual packet. Unlike with + // PacketDataSource's ReadPacketData, the slice returned here points + // to a buffer owned by the data source. In particular, the bytes in + // this buffer may be changed by future calls to + // ZeroCopyReadPacketData. Do not use the returned buffer after + // subsequent ZeroCopyReadPacketData calls. + // ci: Metadata about the capture + // err: An error encountered while reading packet data. If err != nil, + // then data/ci will be ignored. + ZeroCopyReadPacketData() (data []byte, ci CaptureInfo, err error) +} + +// PacketSource reads in packets from a PacketDataSource, decodes them, and +// returns them. +// +// There are currently two different methods for reading packets in through +// a PacketSource: +// +// Reading With Packets Function +// +// This method is the most convenient and easiest to code, but lacks +// flexibility. Packets returns a 'chan Packet', then asynchronously writes +// packets into that channel. Packets uses a blocking channel, and closes +// it if an io.EOF is returned by the underlying PacketDataSource. All other +// PacketDataSource errors are ignored and discarded. +// for packet := range packetSource.Packets() { +// ... +// } +// +// Reading With NextPacket Function +// +// This method is the most flexible, and exposes errors that may be +// encountered by the underlying PacketDataSource. It's also the fastest +// in a tight loop, since it doesn't have the overhead of a channel +// read/write. However, it requires the user to handle errors, most +// importantly the io.EOF error in cases where packets are being read from +// a file. +// for { +// packet, err := packetSource.NextPacket() +// if err == io.EOF { +// break +// } else if err != nil { +// log.Println("Error:", err) +// continue +// } +// handlePacket(packet) // Do something with each packet. +// } +type PacketSource struct { + source PacketDataSource + decoder Decoder + // DecodeOptions is the set of options to use for decoding each piece + // of packet data. This can/should be changed by the user to reflect the + // way packets should be decoded. + DecodeOptions + c chan Packet +} + +// NewPacketSource creates a packet data source. +func NewPacketSource(source PacketDataSource, decoder Decoder) *PacketSource { + return &PacketSource{ + source: source, + decoder: decoder, + } +} + +// NextPacket returns the next decoded packet from the PacketSource. On error, +// it returns a nil packet and a non-nil error. +func (p *PacketSource) NextPacket() (Packet, error) { + data, ci, err := p.source.ReadPacketData() + if err != nil { + return nil, err + } + packet := NewPacket(data, p.decoder, p.DecodeOptions) + m := packet.Metadata() + m.CaptureInfo = ci + m.Truncated = m.Truncated || ci.CaptureLength < ci.Length + return packet, nil +} + +// packetsToChannel reads in all packets from the packet source and sends them +// to the given channel. This routine terminates when a non-temporary error +// is returned by NextPacket(). +func (p *PacketSource) packetsToChannel() { + defer close(p.c) + for { + packet, err := p.NextPacket() + if err == nil { + p.c <- packet + continue + } + + // Immediately retry for temporary network errors + if nerr, ok := err.(net.Error); ok && nerr.Temporary() { + continue + } + + // Immediately retry for EAGAIN + if err == syscall.EAGAIN { + continue + } + + // Immediately break for known unrecoverable errors + if err == io.EOF || err == io.ErrUnexpectedEOF || + err == io.ErrNoProgress || err == io.ErrClosedPipe || err == io.ErrShortBuffer || + err == syscall.EBADF || + strings.Contains(err.Error(), "use of closed file") { + break + } + + // Sleep briefly and try again + time.Sleep(time.Millisecond * time.Duration(5)) + } +} + +// Packets returns a channel of packets, allowing easy iterating over +// packets. Packets will be asynchronously read in from the underlying +// PacketDataSource and written to the returned channel. If the underlying +// PacketDataSource returns an io.EOF error, the channel will be closed. +// If any other error is encountered, it is ignored. +// +// for packet := range packetSource.Packets() { +// handlePacket(packet) // Do something with each packet. +// } +// +// If called more than once, returns the same channel. +func (p *PacketSource) Packets() chan Packet { + if p.c == nil { + p.c = make(chan Packet, 1000) + go p.packetsToChannel() + } + return p.c +} diff --git a/vendor/github.com/google/gopacket/parser.go b/vendor/github.com/google/gopacket/parser.go new file mode 100644 index 0000000000..4a4676f1cf --- /dev/null +++ b/vendor/github.com/google/gopacket/parser.go @@ -0,0 +1,350 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "fmt" +) + +// A container for single LayerType->DecodingLayer mapping. +type decodingLayerElem struct { + typ LayerType + dec DecodingLayer +} + +// DecodingLayer is an interface for packet layers that can decode themselves. +// +// The important part of DecodingLayer is that they decode themselves in-place. +// Calling DecodeFromBytes on a DecodingLayer totally resets the entire layer to +// the new state defined by the data passed in. A returned error leaves the +// DecodingLayer in an unknown intermediate state, thus its fields should not be +// trusted. +// +// Because the DecodingLayer is resetting its own fields, a call to +// DecodeFromBytes should normally not require any memory allocation. +type DecodingLayer interface { + // DecodeFromBytes resets the internal state of this layer to the state + // defined by the passed-in bytes. Slices in the DecodingLayer may + // reference the passed-in data, so care should be taken to copy it + // first should later modification of data be required before the + // DecodingLayer is discarded. + DecodeFromBytes(data []byte, df DecodeFeedback) error + // CanDecode returns the set of LayerTypes this DecodingLayer can + // decode. For Layers that are also DecodingLayers, this will most + // often be that Layer's LayerType(). + CanDecode() LayerClass + // NextLayerType returns the LayerType which should be used to decode + // the LayerPayload. + NextLayerType() LayerType + // LayerPayload is the set of bytes remaining to decode after a call to + // DecodeFromBytes. + LayerPayload() []byte +} + +// DecodingLayerFunc decodes given packet and stores decoded LayerType +// values into specified slice. Returns either first encountered +// unsupported LayerType value or decoding error. In case of success, +// returns (LayerTypeZero, nil). +type DecodingLayerFunc func([]byte, *[]LayerType) (LayerType, error) + +// DecodingLayerContainer stores all DecodingLayer-s and serves as a +// searching tool for DecodingLayerParser. +type DecodingLayerContainer interface { + // Put adds new DecodingLayer to container. The new instance of + // the same DecodingLayerContainer is returned so it may be + // implemented as a value receiver. + Put(DecodingLayer) DecodingLayerContainer + // Decoder returns DecodingLayer to decode given LayerType and + // true if it was found. If no decoder found, return false. + Decoder(LayerType) (DecodingLayer, bool) + // LayersDecoder returns DecodingLayerFunc which decodes given + // packet, starting with specified LayerType and DecodeFeedback. + LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc +} + +// DecodingLayerSparse is a sparse array-based implementation of +// DecodingLayerContainer. Each DecodingLayer is addressed in an +// allocated slice by LayerType value itself. Though this is the +// fastest container it may be memory-consuming if used with big +// LayerType values. +type DecodingLayerSparse []DecodingLayer + +// Put implements DecodingLayerContainer interface. +func (dl DecodingLayerSparse) Put(d DecodingLayer) DecodingLayerContainer { + maxLayerType := LayerType(len(dl) - 1) + for _, typ := range d.CanDecode().LayerTypes() { + if typ > maxLayerType { + maxLayerType = typ + } + } + + if extra := maxLayerType - LayerType(len(dl)) + 1; extra > 0 { + dl = append(dl, make([]DecodingLayer, extra)...) + } + + for _, typ := range d.CanDecode().LayerTypes() { + dl[typ] = d + } + return dl +} + +// LayersDecoder implements DecodingLayerContainer interface. +func (dl DecodingLayerSparse) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc { + return LayersDecoder(dl, first, df) +} + +// Decoder implements DecodingLayerContainer interface. +func (dl DecodingLayerSparse) Decoder(typ LayerType) (DecodingLayer, bool) { + if int64(typ) < int64(len(dl)) { + decoder := dl[typ] + return decoder, decoder != nil + } + return nil, false +} + +// DecodingLayerArray is an array-based implementation of +// DecodingLayerContainer. Each DecodingLayer is searched linearly in +// an allocated slice in one-by-one fashion. +type DecodingLayerArray []decodingLayerElem + +// Put implements DecodingLayerContainer interface. +func (dl DecodingLayerArray) Put(d DecodingLayer) DecodingLayerContainer { +TYPES: + for _, typ := range d.CanDecode().LayerTypes() { + for i := range dl { + if dl[i].typ == typ { + dl[i].dec = d + continue TYPES + } + } + dl = append(dl, decodingLayerElem{typ, d}) + } + return dl +} + +// Decoder implements DecodingLayerContainer interface. +func (dl DecodingLayerArray) Decoder(typ LayerType) (DecodingLayer, bool) { + for i := range dl { + if dl[i].typ == typ { + return dl[i].dec, true + } + } + return nil, false +} + +// LayersDecoder implements DecodingLayerContainer interface. +func (dl DecodingLayerArray) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc { + return LayersDecoder(dl, first, df) +} + +// DecodingLayerMap is an map-based implementation of +// DecodingLayerContainer. Each DecodingLayer is searched in a map +// hashed by LayerType value. +type DecodingLayerMap map[LayerType]DecodingLayer + +// Put implements DecodingLayerContainer interface. +func (dl DecodingLayerMap) Put(d DecodingLayer) DecodingLayerContainer { + for _, typ := range d.CanDecode().LayerTypes() { + if dl == nil { + dl = make(map[LayerType]DecodingLayer) + } + dl[typ] = d + } + return dl +} + +// Decoder implements DecodingLayerContainer interface. +func (dl DecodingLayerMap) Decoder(typ LayerType) (DecodingLayer, bool) { + d, ok := dl[typ] + return d, ok +} + +// LayersDecoder implements DecodingLayerContainer interface. +func (dl DecodingLayerMap) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc { + return LayersDecoder(dl, first, df) +} + +// Static code check. +var ( + _ = []DecodingLayerContainer{ + DecodingLayerSparse(nil), + DecodingLayerMap(nil), + DecodingLayerArray(nil), + } +) + +// DecodingLayerParser parses a given set of layer types. See DecodeLayers for +// more information on how DecodingLayerParser should be used. +type DecodingLayerParser struct { + // DecodingLayerParserOptions is the set of options available to the + // user to define the parser's behavior. + DecodingLayerParserOptions + dlc DecodingLayerContainer + first LayerType + df DecodeFeedback + + decodeFunc DecodingLayerFunc + + // Truncated is set when a decode layer detects that the packet has been + // truncated. + Truncated bool +} + +// AddDecodingLayer adds a decoding layer to the parser. This adds support for +// the decoding layer's CanDecode layers to the parser... should they be +// encountered, they'll be parsed. +func (l *DecodingLayerParser) AddDecodingLayer(d DecodingLayer) { + l.SetDecodingLayerContainer(l.dlc.Put(d)) +} + +// SetTruncated is used by DecodingLayers to set the Truncated boolean in the +// DecodingLayerParser. Users should simply read Truncated after calling +// DecodeLayers. +func (l *DecodingLayerParser) SetTruncated() { + l.Truncated = true +} + +// NewDecodingLayerParser creates a new DecodingLayerParser and adds in all +// of the given DecodingLayers with AddDecodingLayer. +// +// Each call to DecodeLayers will attempt to decode the given bytes first by +// treating them as a 'first'-type layer, then by using NextLayerType on +// subsequently decoded layers to find the next relevant decoder. Should a +// deoder not be available for the layer type returned by NextLayerType, +// decoding will stop. +// +// NewDecodingLayerParser uses DecodingLayerMap container by +// default. +func NewDecodingLayerParser(first LayerType, decoders ...DecodingLayer) *DecodingLayerParser { + dlp := &DecodingLayerParser{first: first} + dlp.df = dlp // Cast this once to the interface + // default container + dlc := DecodingLayerContainer(DecodingLayerMap(make(map[LayerType]DecodingLayer))) + for _, d := range decoders { + dlc = dlc.Put(d) + } + + dlp.SetDecodingLayerContainer(dlc) + return dlp +} + +// SetDecodingLayerContainer specifies container with decoders. This +// call replaces all decoders already registered in given instance of +// DecodingLayerParser. +func (l *DecodingLayerParser) SetDecodingLayerContainer(dlc DecodingLayerContainer) { + l.dlc = dlc + l.decodeFunc = l.dlc.LayersDecoder(l.first, l.df) +} + +// DecodeLayers decodes as many layers as possible from the given data. It +// initially treats the data as layer type 'typ', then uses NextLayerType on +// each subsequent decoded layer until it gets to a layer type it doesn't know +// how to parse. +// +// For each layer successfully decoded, DecodeLayers appends the layer type to +// the decoded slice. DecodeLayers truncates the 'decoded' slice initially, so +// there's no need to empty it yourself. +// +// This decoding method is about an order of magnitude faster than packet +// decoding, because it only decodes known layers that have already been +// allocated. This means it doesn't need to allocate each layer it returns... +// instead it overwrites the layers that already exist. +// +// Example usage: +// func main() { +// var eth layers.Ethernet +// var ip4 layers.IPv4 +// var ip6 layers.IPv6 +// var tcp layers.TCP +// var udp layers.UDP +// var payload gopacket.Payload +// parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip4, &ip6, &tcp, &udp, &payload) +// var source gopacket.PacketDataSource = getMyDataSource() +// decodedLayers := make([]gopacket.LayerType, 0, 10) +// for { +// data, _, err := source.ReadPacketData() +// if err != nil { +// fmt.Println("Error reading packet data: ", err) +// continue +// } +// fmt.Println("Decoding packet") +// err = parser.DecodeLayers(data, &decodedLayers) +// for _, typ := range decodedLayers { +// fmt.Println(" Successfully decoded layer type", typ) +// switch typ { +// case layers.LayerTypeEthernet: +// fmt.Println(" Eth ", eth.SrcMAC, eth.DstMAC) +// case layers.LayerTypeIPv4: +// fmt.Println(" IP4 ", ip4.SrcIP, ip4.DstIP) +// case layers.LayerTypeIPv6: +// fmt.Println(" IP6 ", ip6.SrcIP, ip6.DstIP) +// case layers.LayerTypeTCP: +// fmt.Println(" TCP ", tcp.SrcPort, tcp.DstPort) +// case layers.LayerTypeUDP: +// fmt.Println(" UDP ", udp.SrcPort, udp.DstPort) +// } +// } +// if decodedLayers.Truncated { +// fmt.Println(" Packet has been truncated") +// } +// if err != nil { +// fmt.Println(" Error encountered:", err) +// } +// } +// } +// +// If DecodeLayers is unable to decode the next layer type, it will return the +// error UnsupportedLayerType. +func (l *DecodingLayerParser) DecodeLayers(data []byte, decoded *[]LayerType) (err error) { + l.Truncated = false + if !l.IgnorePanic { + defer panicToError(&err) + } + typ, err := l.decodeFunc(data, decoded) + if typ != LayerTypeZero { + // no decoder + if l.IgnoreUnsupported { + return nil + } + return UnsupportedLayerType(typ) + } + return err +} + +// UnsupportedLayerType is returned by DecodingLayerParser if DecodeLayers +// encounters a layer type that the DecodingLayerParser has no decoder for. +type UnsupportedLayerType LayerType + +// Error implements the error interface, returning a string to say that the +// given layer type is unsupported. +func (e UnsupportedLayerType) Error() string { + return fmt.Sprintf("No decoder for layer type %v", LayerType(e)) +} + +func panicToError(e *error) { + if r := recover(); r != nil { + *e = fmt.Errorf("panic: %v", r) + } +} + +// DecodingLayerParserOptions provides options to affect the behavior of a given +// DecodingLayerParser. +type DecodingLayerParserOptions struct { + // IgnorePanic determines whether a DecodingLayerParser should stop + // panics on its own (by returning them as an error from DecodeLayers) + // or should allow them to raise up the stack. Handling errors does add + // latency to the process of decoding layers, but is much safer for + // callers. IgnorePanic defaults to false, thus if the caller does + // nothing decode panics will be returned as errors. + IgnorePanic bool + // IgnoreUnsupported will stop parsing and return a nil error when it + // encounters a layer it doesn't have a parser for, instead of returning an + // UnsupportedLayerType error. If this is true, it's up to the caller to make + // sure that all expected layers have been parsed (by checking the decoded + // slice). + IgnoreUnsupported bool +} diff --git a/vendor/github.com/google/gopacket/time.go b/vendor/github.com/google/gopacket/time.go new file mode 100644 index 0000000000..6d116cdfbc --- /dev/null +++ b/vendor/github.com/google/gopacket/time.go @@ -0,0 +1,72 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "fmt" + "math" + "time" +) + +// TimestampResolution represents the resolution of timestamps in Base^Exponent. +type TimestampResolution struct { + Base, Exponent int +} + +func (t TimestampResolution) String() string { + return fmt.Sprintf("%d^%d", t.Base, t.Exponent) +} + +// ToDuration returns the smallest representable time difference as a time.Duration +func (t TimestampResolution) ToDuration() time.Duration { + if t.Base == 0 { + return 0 + } + if t.Exponent == 0 { + return time.Second + } + switch t.Base { + case 10: + return time.Duration(math.Pow10(t.Exponent + 9)) + case 2: + if t.Exponent < 0 { + return time.Second >> uint(-t.Exponent) + } + return time.Second << uint(t.Exponent) + default: + // this might loose precision + return time.Duration(float64(time.Second) * math.Pow(float64(t.Base), float64(t.Exponent))) + } +} + +// TimestampResolutionInvalid represents an invalid timestamp resolution +var TimestampResolutionInvalid = TimestampResolution{} + +// TimestampResolutionMillisecond is a resolution of 10^-3s +var TimestampResolutionMillisecond = TimestampResolution{10, -3} + +// TimestampResolutionMicrosecond is a resolution of 10^-6s +var TimestampResolutionMicrosecond = TimestampResolution{10, -6} + +// TimestampResolutionNanosecond is a resolution of 10^-9s +var TimestampResolutionNanosecond = TimestampResolution{10, -9} + +// TimestampResolutionNTP is the resolution of NTP timestamps which is 2^-32 ≈ 233 picoseconds +var TimestampResolutionNTP = TimestampResolution{2, -32} + +// TimestampResolutionCaptureInfo is the resolution used in CaptureInfo, which his currently nanosecond +var TimestampResolutionCaptureInfo = TimestampResolutionNanosecond + +// PacketSourceResolution is an interface for packet data sources that +// support reporting the timestamp resolution of the aqcuired timestamps. +// Returned timestamps will always have NanosecondTimestampResolution due +// to the use of time.Time, but scaling might have occured if acquired +// timestamps have a different resolution. +type PacketSourceResolution interface { + // Resolution returns the timestamp resolution of acquired timestamps before scaling to NanosecondTimestampResolution. + Resolution() TimestampResolution +} diff --git a/vendor/github.com/google/gopacket/writer.go b/vendor/github.com/google/gopacket/writer.go new file mode 100644 index 0000000000..5d303dc4a7 --- /dev/null +++ b/vendor/github.com/google/gopacket/writer.go @@ -0,0 +1,232 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "fmt" +) + +// SerializableLayer allows its implementations to be written out as a set of bytes, +// so those bytes may be sent on the wire or otherwise used by the caller. +// SerializableLayer is implemented by certain Layer types, and can be encoded to +// bytes using the LayerWriter object. +type SerializableLayer interface { + // SerializeTo writes this layer to a slice, growing that slice if necessary + // to make it fit the layer's data. + // Args: + // b: SerializeBuffer to write this layer on to. When called, b.Bytes() + // is the payload this layer should wrap, if any. Note that this + // layer can either prepend itself (common), append itself + // (uncommon), or both (sometimes padding or footers are required at + // the end of packet data). It's also possible (though probably very + // rarely needed) to overwrite any bytes in the current payload. + // After this call, b.Bytes() should return the byte encoding of + // this layer wrapping the original b.Bytes() payload. + // opts: options to use while writing out data. + // Returns: + // error if a problem was encountered during encoding. If an error is + // returned, the bytes in data should be considered invalidated, and + // not used. + // + // SerializeTo calls SHOULD entirely ignore LayerContents and + // LayerPayload. It just serializes based on struct fields, neither + // modifying nor using contents/payload. + SerializeTo(b SerializeBuffer, opts SerializeOptions) error + // LayerType returns the type of the layer that is being serialized to the buffer + LayerType() LayerType +} + +// SerializeOptions provides options for behaviors that SerializableLayers may want to +// implement. +type SerializeOptions struct { + // FixLengths determines whether, during serialization, layers should fix + // the values for any length field that depends on the payload. + FixLengths bool + // ComputeChecksums determines whether, during serialization, layers + // should recompute checksums based on their payloads. + ComputeChecksums bool +} + +// SerializeBuffer is a helper used by gopacket for writing out packet layers. +// SerializeBuffer starts off as an empty []byte. Subsequent calls to PrependBytes +// return byte slices before the current Bytes(), AppendBytes returns byte +// slices after. +// +// Byte slices returned by PrependBytes/AppendBytes are NOT zero'd out, so if +// you want to make sure they're all zeros, set them as such. +// +// SerializeBuffer is specifically designed to handle packet writing, where unlike +// with normal writes it's easier to start writing at the inner-most layer and +// work out, meaning that we often need to prepend bytes. This runs counter to +// typical writes to byte slices using append(), where we only write at the end +// of the buffer. +// +// It can be reused via Clear. Note, however, that a Clear call will invalidate the +// byte slices returned by any previous Bytes() call (the same buffer is +// reused). +// +// 1) Reusing a write buffer is generally much faster than creating a new one, +// and with the default implementation it avoids additional memory allocations. +// 2) If a byte slice from a previous Bytes() call will continue to be used, +// it's better to create a new SerializeBuffer. +// +// The Clear method is specifically designed to minimize memory allocations for +// similar later workloads on the SerializeBuffer. IE: if you make a set of +// Prepend/Append calls, then clear, then make the same calls with the same +// sizes, the second round (and all future similar rounds) shouldn't allocate +// any new memory. +type SerializeBuffer interface { + // Bytes returns the contiguous set of bytes collected so far by Prepend/Append + // calls. The slice returned by Bytes will be modified by future Clear calls, + // so if you're planning on clearing this SerializeBuffer, you may want to copy + // Bytes somewhere safe first. + Bytes() []byte + // PrependBytes returns a set of bytes which prepends the current bytes in this + // buffer. These bytes start in an indeterminate state, so they should be + // overwritten by the caller. The caller must only call PrependBytes if they + // know they're going to immediately overwrite all bytes returned. + PrependBytes(num int) ([]byte, error) + // AppendBytes returns a set of bytes which appends the current bytes in this + // buffer. These bytes start in an indeterminate state, so they should be + // overwritten by the caller. The caller must only call AppendBytes if they + // know they're going to immediately overwrite all bytes returned. + AppendBytes(num int) ([]byte, error) + // Clear resets the SerializeBuffer to a new, empty buffer. After a call to clear, + // the byte slice returned by any previous call to Bytes() for this buffer + // should be considered invalidated. + Clear() error + // Layers returns all the Layers that have been successfully serialized into this buffer + // already. + Layers() []LayerType + // PushLayer adds the current Layer to the list of Layers that have been serialized + // into this buffer. + PushLayer(LayerType) +} + +type serializeBuffer struct { + data []byte + start int + prepended, appended int + layers []LayerType +} + +// NewSerializeBuffer creates a new instance of the default implementation of +// the SerializeBuffer interface. +func NewSerializeBuffer() SerializeBuffer { + return &serializeBuffer{} +} + +// NewSerializeBufferExpectedSize creates a new buffer for serialization, optimized for an +// expected number of bytes prepended/appended. This tends to decrease the +// number of memory allocations made by the buffer during writes. +func NewSerializeBufferExpectedSize(expectedPrependLength, expectedAppendLength int) SerializeBuffer { + return &serializeBuffer{ + data: make([]byte, expectedPrependLength, expectedPrependLength+expectedAppendLength), + start: expectedPrependLength, + prepended: expectedPrependLength, + appended: expectedAppendLength, + } +} + +func (w *serializeBuffer) Bytes() []byte { + return w.data[w.start:] +} + +func (w *serializeBuffer) PrependBytes(num int) ([]byte, error) { + if num < 0 { + panic("num < 0") + } + if w.start < num { + toPrepend := w.prepended + if toPrepend < num { + toPrepend = num + } + w.prepended += toPrepend + length := cap(w.data) + toPrepend + newData := make([]byte, length) + newStart := w.start + toPrepend + copy(newData[newStart:], w.data[w.start:]) + w.start = newStart + w.data = newData[:toPrepend+len(w.data)] + } + w.start -= num + return w.data[w.start : w.start+num], nil +} + +func (w *serializeBuffer) AppendBytes(num int) ([]byte, error) { + if num < 0 { + panic("num < 0") + } + initialLength := len(w.data) + if cap(w.data)-initialLength < num { + toAppend := w.appended + if toAppend < num { + toAppend = num + } + w.appended += toAppend + newData := make([]byte, cap(w.data)+toAppend) + copy(newData[w.start:], w.data[w.start:]) + w.data = newData[:initialLength] + } + // Grow the buffer. We know it'll be under capacity given above. + w.data = w.data[:initialLength+num] + return w.data[initialLength:], nil +} + +func (w *serializeBuffer) Clear() error { + w.start = w.prepended + w.data = w.data[:w.start] + w.layers = w.layers[:0] + return nil +} + +func (w *serializeBuffer) Layers() []LayerType { + return w.layers +} + +func (w *serializeBuffer) PushLayer(l LayerType) { + w.layers = append(w.layers, l) +} + +// SerializeLayers clears the given write buffer, then writes all layers into it so +// they correctly wrap each other. Note that by clearing the buffer, it +// invalidates all slices previously returned by w.Bytes() +// +// Example: +// buf := gopacket.NewSerializeBuffer() +// opts := gopacket.SerializeOptions{} +// gopacket.SerializeLayers(buf, opts, a, b, c) +// firstPayload := buf.Bytes() // contains byte representation of a(b(c)) +// gopacket.SerializeLayers(buf, opts, d, e, f) +// secondPayload := buf.Bytes() // contains byte representation of d(e(f)). firstPayload is now invalidated, since the SerializeLayers call Clears buf. +func SerializeLayers(w SerializeBuffer, opts SerializeOptions, layers ...SerializableLayer) error { + w.Clear() + for i := len(layers) - 1; i >= 0; i-- { + layer := layers[i] + err := layer.SerializeTo(w, opts) + if err != nil { + return err + } + w.PushLayer(layer.LayerType()) + } + return nil +} + +// SerializePacket is a convenience function that calls SerializeLayers +// on packet's Layers(). +// It returns an error if one of the packet layers is not a SerializableLayer. +func SerializePacket(buf SerializeBuffer, opts SerializeOptions, packet Packet) error { + sls := []SerializableLayer{} + for _, layer := range packet.Layers() { + sl, ok := layer.(SerializableLayer) + if !ok { + return fmt.Errorf("layer %s is not serializable", layer.LayerType().String()) + } + sls = append(sls, sl) + } + return SerializeLayers(buf, opts, sls...) +} diff --git a/vendor/github.com/gorilla/mux/.editorconfig b/vendor/github.com/gorilla/mux/.editorconfig new file mode 100644 index 0000000000..c6b74c3e0d --- /dev/null +++ b/vendor/github.com/gorilla/mux/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/.gitignore b/vendor/github.com/gorilla/mux/.gitignore new file mode 100644 index 0000000000..84039fec68 --- /dev/null +++ b/vendor/github.com/gorilla/mux/.gitignore @@ -0,0 +1 @@ +coverage.coverprofile diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS deleted file mode 100644 index b722392ee5..0000000000 --- a/vendor/github.com/gorilla/mux/AUTHORS +++ /dev/null @@ -1,8 +0,0 @@ -# This is the official list of gorilla/mux authors for copyright purposes. -# -# Please keep the list sorted. - -Google LLC (https://opensource.google.com/) -Kamil Kisielk -Matt Silverlock -Rodrigo Moraes (https://github.com/moraes) diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE index 6903df6386..bb9d80bc9b 100644 --- a/vendor/github.com/gorilla/mux/LICENSE +++ b/vendor/github.com/gorilla/mux/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gorilla/mux/Makefile b/vendor/github.com/gorilla/mux/Makefile new file mode 100644 index 0000000000..98f5ab75f9 --- /dev/null +++ b/vendor/github.com/gorilla/mux/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index 35eea9f106..382513d57c 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -1,12 +1,12 @@ # gorilla/mux -[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux) -[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) +![testing](https://github.com/gorilla/mux/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/mux/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/mux) +[![godoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) -![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png) -https://www.gorillatoolkit.org/pkg/mux +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to their respective handler. @@ -247,32 +247,25 @@ type spaHandler struct { // file located at the index path on the SPA handler will be served. This // is suitable behavior for serving an SPA (single page application). func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // get the absolute path to prevent directory traversal - path, err := filepath.Abs(r.URL.Path) - if err != nil { - // if we failed to get the absolute path respond with a 400 bad request - // and stop - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // prepend the path with the path to the static directory - path = filepath.Join(h.staticPath, path) + // Join internally call path.Clean to prevent directory traversal + path := filepath.Join(h.staticPath, r.URL.Path) - // check whether a file exists at the given path - _, err = os.Stat(path) - if os.IsNotExist(err) { - // file does not exist, serve index.html + // check whether a file exists or is a directory at the given path + fi, err := os.Stat(path) + if os.IsNotExist(err) || fi.IsDir() { + // file does not exist or path is a directory, serve index.html http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) return - } else if err != nil { - // if we got an error (that wasn't that the file doesn't exist) stating the - // file, return a 500 internal server error and stop + } + + if err != nil { + // if we got an error (that wasn't that the file doesn't exist) stating the + // file, return a 500 internal server error and stop http.Error(w, err.Error(), http.StatusInternalServerError) - return + return } - // otherwise, use http.FileServer to serve the static dir + // otherwise, use http.FileServer to serve the static file http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) } @@ -375,6 +368,19 @@ url, err := r.Get("article").URL("subdomain", "news", "id", "42") ``` +To find all the required variables for a given route when calling `URL()`, the method `GetVarNames()` is available: +```go +r := mux.NewRouter() +r.Host("{domain}"). + Path("/{group}/{item_id}"). + Queries("some_data1", "{some_data1}"). + Queries("some_data2", "{some_data2}"). + Name("article") + +// Will print [domain group item_id some_data1 some_data2] +fmt.Println(r.Get("article").GetVarNames()) + +``` ### Walking Routes The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, @@ -572,7 +578,7 @@ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler r := mux.NewRouter() r.HandleFunc("/", handler) -amw := authenticationMiddleware{} +amw := authenticationMiddleware{tokenUsers: make(map[string]string)} amw.Populate() r.Use(amw.Middleware) @@ -758,7 +764,8 @@ func TestMetricsHandler(t *testing.T) { rr := httptest.NewRecorder() - // Need to create a router that we can pass the request through so that the vars will be added to the context + // To add the vars to the context, + // we need to create a router through which we can pass the request. router := mux.NewRouter() router.HandleFunc("/metrics/{type}", MetricsHandler) router.ServeHTTP(rr, req) diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go index bd5a38b55d..80601351fd 100644 --- a/vendor/github.com/gorilla/mux/doc.go +++ b/vendor/github.com/gorilla/mux/doc.go @@ -10,18 +10,18 @@ http.ServeMux, mux.Router matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts, paths and query values can have variables with an optional - regular expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. + - Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + - URL hosts, paths and query values can have variables with an optional + regular expression. + - Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + - Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + - It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. Let's start registering a couple of URL paths and handlers: @@ -301,6 +301,5 @@ A more complex authentication middleware, which maps session token to users, cou r.Use(amw.Middleware) Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. - */ package mux diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index 782a34b22a..1e089906fa 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -31,24 +31,26 @@ func NewRouter() *Router { // It implements the http.Handler interface, so it can be registered to serve // requests: // -// var router = mux.NewRouter() +// var router = mux.NewRouter() // -// func main() { -// http.Handle("/", router) -// } +// func main() { +// http.Handle("/", router) +// } // // Or, for Google App Engine, register it in a init() function: // -// func init() { -// http.Handle("/", router) -// } +// func init() { +// http.Handle("/", router) +// } // // This will send all incoming requests to the router. type Router struct { // Configurable Handler to be used when no route matches. + // This can be used to render your own 404 Not Found errors. NotFoundHandler http.Handler // Configurable Handler to be used when the request method does not match the route. + // This can be used to render your own 405 Method Not Allowed errors. MethodNotAllowedHandler http.Handler // Routes to be matched, in order. diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index 0144842bb2..5d05cfa0e9 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -22,10 +22,10 @@ type routeRegexpOptions struct { type regexpType int const ( - regexpTypePath regexpType = 0 - regexpTypeHost regexpType = 1 - regexpTypePrefix regexpType = 2 - regexpTypeQuery regexpType = 3 + regexpTypePath regexpType = iota + regexpTypeHost + regexpTypePrefix + regexpTypeQuery ) // newRouteRegexp parses a route template and returns a routeRegexp, @@ -195,7 +195,7 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { // url builds a URL part using the given values. func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN), len(r.varsN)) + urlValues := make([]interface{}, len(r.varsN)) for k, v := range r.varsN { value, ok := values[v] if !ok { diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go index 750afe570d..e8f11df221 100644 --- a/vendor/github.com/gorilla/mux/route.go +++ b/vendor/github.com/gorilla/mux/route.go @@ -64,8 +64,18 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool { match.MatchErr = nil } - matchErr = nil + matchErr = nil // nolint:ineffassign return false + } else { + // Multiple routes may share the same path but use different HTTP methods. For instance: + // Route 1: POST "/users/{id}". + // Route 2: GET "/users/{id}", parameters: "id": "[0-9]+". + // + // The router must handle these cases correctly. For a GET request to "/users/abc" with "id" as "-2", + // The router should return a "Not Found" error as no route fully matches this request. + if match.MatchErr == ErrMethodMismatch { + match.MatchErr = nil + } } } @@ -230,9 +240,9 @@ func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { // Headers adds a matcher for request header values. // It accepts a sequence of key/value pairs to be matched. For example: // -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") +// r := mux.NewRouter().NewRoute() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both request header values match. // If the value is an empty string, it will match any value if the key is set. @@ -255,9 +265,9 @@ func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { // HeadersRegexp accepts a sequence of key/value pairs, where the value has regex // support. For example: // -// r := mux.NewRouter() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") +// r := mux.NewRouter().NewRoute() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both the request header matches both regular expressions. // If the value is an empty string, it will match any value if the key is set. @@ -283,10 +293,10 @@ func (r *Route) HeadersRegexp(pairs ...string) *Route { // // For example: // -// r := mux.NewRouter() -// r.Host("www.example.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") +// r := mux.NewRouter().NewRoute() +// r.Host("www.example.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") // // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). @@ -342,11 +352,11 @@ func (r *Route) Methods(methods ...string) *Route { // // For example: // -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) +// r := mux.NewRouter().NewRoute() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) // // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). @@ -377,8 +387,8 @@ func (r *Route) PathPrefix(tpl string) *Route { // It accepts a sequence of key/value pairs. Values may define variables. // For example: // -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// r := mux.NewRouter().NewRoute() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") // // The above route will only match if the URL contains the defined queries // values, e.g.: ?foo=bar&id=42. @@ -473,11 +483,11 @@ func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { // // It will test the inner routes only if the parent route matched. For example: // -// r := mux.NewRouter() -// s := r.Host("www.example.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// r := mux.NewRouter().NewRoute() +// s := r.Host("www.example.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) // // Here, the routes registered in the subrouter won't be tested if the host // doesn't match. @@ -497,36 +507,36 @@ func (r *Route) Subrouter() *Router { // It accepts a sequence of key/value pairs for the route variables. For // example, given this route: // -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") // // ...a URL for it can be built using: // -// url, err := r.Get("article").URL("category", "technology", "id", "42") +// url, err := r.Get("article").URL("category", "technology", "id", "42") // // ...which will return an url.URL with the following path: // -// "/articles/technology/42" +// "/articles/technology/42" // // This also works for host variables: // -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Host("{subdomain}.domain.com"). -// Name("article") +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Host("{subdomain}.domain.com"). +// Name("article") // -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") // // The scheme of the resulting url will be the first argument that was passed to Schemes: // -// // url.String() will be "https://example.com" -// r := mux.NewRouter() -// url, err := r.Host("example.com") -// .Schemes("https", "http").URL() +// // url.String() will be "https://example.com" +// r := mux.NewRouter().NewRoute() +// url, err := r.Host("example.com") +// .Schemes("https", "http").URL() // // All variables defined in the route are required, and their values must // conform to the corresponding patterns. @@ -718,6 +728,25 @@ func (r *Route) GetHostTemplate() (string, error) { return r.regexp.host.template, nil } +// GetVarNames returns the names of all variables added by regexp matchers +// These can be used to know which route variables should be passed into r.URL() +func (r *Route) GetVarNames() ([]string, error) { + if r.err != nil { + return nil, r.err + } + var varNames []string + if r.regexp.host != nil { + varNames = append(varNames, r.regexp.host.varsN...) + } + if r.regexp.path != nil { + varNames = append(varNames, r.regexp.path.varsN...) + } + for _, regx := range r.regexp.queries { + varNames = append(varNames, regx.varsN...) + } + return varNames, nil +} + // prepareVars converts the route variable pairs into a map. If the route has a // BuildVarsFunc, it is invoked. func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore new file mode 100644 index 0000000000..cd3fcd1ef7 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +.idea/ +*.iml diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 0000000000..1931f40068 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Google LLC (https://opensource.google.com/) +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 0000000000..9171c97225 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 0000000000..2517a28715 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,39 @@ +# Gorilla WebSocket + +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + + +--- + +⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)** + +--- + +### Documentation + +* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 0000000000..2efd83555d --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,422 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptrace" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +// +// It is safe to call Dialer's methods concurrently. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // NetDialContext specifies the dial function for creating TCP connections. If + // NetDialContext is nil, NetDial is used. + NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If + // NetDialTLSContext is nil, NetDialContext is used. + // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and + // TLSClientConfig is ignored. + NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake + // is done there and TLSClientConfig is ignored. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +// Dial creates a new client connection by calling DialContext with a background context. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + return d.DialContext(context.Background(), urlStr, requestHeader) +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, +} + +// nilDialer is dialer to use when receiver is nil. +var nilDialer = *DefaultDialer + +// DialContext creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// The context will be used in the request and in the Dialer. +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + if d == nil { + d = &nilDialer + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: http.MethodGet, + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + req = req.WithContext(ctx) + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + case k == "Sec-Websocket-Protocol": + req.Header["Sec-WebSocket-Protocol"] = vs + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} + } + + if d.HandshakeTimeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) + defer cancel() + } + + // Get network dial function. + var netDial func(network, add string) (net.Conn, error) + + switch u.Scheme { + case "http": + if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + case "https": + if d.NetDialTLSContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialTLSContext(ctx, network, addr) + } + } else if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + default: + return nil, nil, errMalformedURL + } + + if netDial == nil { + netDialer := &net.Dialer{} + netDial = func(network, addr string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, addr) + } + } + + // If needed, wrap the dial function to set the connection deadline. + if deadline, ok := ctx.Deadline(); ok { + forwardDial := netDial + netDial = func(network, addr string) (net.Conn, error) { + c, err := forwardDial(network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + c.Close() + return nil, err + } + return c, nil + } + } + + // If needed, wrap the dial function to connect through a proxy. + if d.Proxy != nil { + proxyURL, err := d.Proxy(req) + if err != nil { + return nil, nil, err + } + if proxyURL != nil { + dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + if err != nil { + return nil, nil, err + } + netDial = dialer.Dial + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + trace := httptrace.ContextClientTrace(ctx) + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) + } + + netConn, err := netDial("tcp", hostPort) + if trace != nil && trace.GotConn != nil { + trace.GotConn(httptrace.GotConnInfo{ + Conn: netConn, + }) + } + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if u.Scheme == "https" && d.NetDialTLSContext == nil { + // If NetDialTLSContext is set, assume that the TLS handshake has already been done + + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := doHandshake(ctx, tlsConn, cfg) + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) + } + + if err != nil { + return nil, nil, err + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + if trace != nil && trace.GotFirstResponseByte != nil { + if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { + trace.GotFirstResponseByte() + } + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !tokenListContainsValue(resp.Header, "Upgrade", "websocket") || + !tokenListContainsValue(resp.Header, "Connection", "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 0000000000..813ffb1e84 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 0000000000..331eebc850 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1230 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a pong control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents a close message. +type CloseError struct { + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this +// interface. The type of the value stored in a pool is not specified. +type BufferPool interface { + // Get gets a value from the pool or returns nil if the pool is empty. + Get() interface{} + // Put adds a value to the pool. + Put(interface{}) +} + +// writePoolData is the type added to the write buffer pool. This wrapper is +// used to prevent applications from peeking at and depending on the values +// added to the pool. +type writePoolData struct{ buf []byte } + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan struct{} // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writePool BufferPool + writeBufSize int + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + // bytes remaining in current frame. + // set setReadRemaining to safely update this value and prevent overflow + readRemaining int64 + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { + + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } else if readBufferSize < maxControlFramePayloadSize { + // must be large enough for control frame + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBufferSize += maxFrameHeaderSize + + if writeBuf == nil && writeBufferPool == nil { + writeBuf = make([]byte, writeBufferSize) + } + + mu := make(chan struct{}, 1) + mu <- struct{}{} + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + writePool: writeBufferPool, + writeBufSize: writeBufferSize, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// setReadRemaining tracks the number of bytes remaining on the connection. If n +// overflows, an ErrReadLimit is returned. +func (c *Conn) setReadRemaining(n int64) error { + if n < 0 { + return ErrReadLimit + } + + c.readRemaining = n + return nil +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting +// for a close message. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} + +func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { + <-c.mu + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + if len(buf1) == 0 { + _, err = c.conn.Write(buf0) + } else { + err = c.writeBufs(buf0, buf1) + } + if err != nil { + return c.writeFatal(err) + } + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return nil +} + +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := 1000 * time.Hour + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +// beginMessage prepares a connection and message writer for a new message. +func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + mw.c = c + mw.frameType = messageType + mw.pos = maxFrameHeaderSize + + if c.writeBuf == nil { + wpd, ok := c.writePool.Get().(writePoolData) + if ok { + c.writeBuf = wpd.buf + } else { + c.writeBuf = make([]byte, c.writeBufSize) + } + } + return nil +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and +// PongMessage) are supported. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return nil, err + } + c.writer = &mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) endMessage(err error) error { + if w.err != nil { + return err + } + c := w.c + w.err = err + c.writer = nil + if c.writePool != nil { + c.writePool.Put(writePoolData{buf: c.writeBuf}) + c.writeBuf = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.endMessage(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.endMessage(err) + } + + if final { + w.endMessage(errWriteClosed) + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + return w.flushFrame(true, nil) +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return err + } + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + // To aid debugging, collect and report all errors in the first two bytes + // of the header. + + var errors []string + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + frameType := int(p[0] & 0xf) + final := p[0]&finalBit != 0 + rsv1 := p[0]&rsv1Bit != 0 + rsv2 := p[0]&rsv2Bit != 0 + rsv3 := p[0]&rsv3Bit != 0 + mask := p[1]&maskBit != 0 + c.setReadRemaining(int64(p[1] & 0x7f)) + + c.readDecompress = false + if rsv1 { + if c.newDecompressionReader != nil { + c.readDecompress = true + } else { + errors = append(errors, "RSV1 set") + } + } + + if rsv2 { + errors = append(errors, "RSV2 set") + } + + if rsv3 { + errors = append(errors, "RSV3 set") + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + errors = append(errors, "len > 125 for control") + } + if !final { + errors = append(errors, "FIN not set on control") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + errors = append(errors, "data before FIN") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + errors = append(errors, "continuation after FIN") + } + c.readFinal = final + default: + errors = append(errors, "bad opcode "+strconv.Itoa(frameType)) + } + + if mask != c.isServer { + errors = append(errors, "bad MASK") + } + + if len(errors) > 0 { + return noFrame, c.handleProtocolError(strings.Join(errors, ", ")) + } + + // 3. Read and parse frame length as per + // https://tools.ietf.org/html/rfc6455#section-5.2 + // + // The length of the "Payload data", in bytes: if 0-125, that is the payload + // length. + // - If 126, the following 2 bytes interpreted as a 16-bit unsigned + // integer are the payload length. + // - If 127, the following 8 bytes interpreted as + // a 64-bit unsigned integer (the most significant bit MUST be 0) are the + // payload length. Multibyte length quantities are expressed in network byte + // order. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { + return noFrame, err + } + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { + return noFrame, err + } + } + + // 4. Handle frame masking. + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + // Don't allow readLength to overflow in the presence of a large readRemaining + // counter. + if c.readLength < 0 { + return noFrame, ErrReadLimit + } + + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + c.setReadRemaining(0) + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode)) + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + data := FormatCloseMessage(CloseProtocolError, message) + if len(data) > maxControlFramePayloadSize { + data = data[:maxControlFramePayloadSize] + } + c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + rem := c.readRemaining + rem -= int64(n) + c.setReadRemaining(rem) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a +// message exceeds the limit, the connection sends a close message to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close +// message back to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. +// +// The connection read methods return a CloseError when a close message is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close message back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := FormatCloseMessage(code, "") + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING message application data. The default +// ping handler sends a pong to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG message application data. The default +// pong handler does nothing. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +// An empty message is returned for code CloseNoStatusReceived. +func FormatCloseMessage(closeCode int, text string) []byte { + if closeCode == CloseNoStatusReceived { + // Return empty message because it's illegal to send + // CloseNoStatusReceived. Return non-nil value in case application + // checks for nil. + return []byte{} + } + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 0000000000..8db0cef95a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,227 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// log.Println(err) +// return +// } +// if err := conn.WriteMessage(messageType, p); err != nil { +// log.Println(err) +// return +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by calling the handler function +// set with the SetCloseHandler method and by returning a *CloseError from the +// NextReader, ReadMessage or the message Read method. The default close +// handler sends a close message to the peer. +// +// Connections handle received ping messages by calling the handler function +// set with the SetPingHandler method. The default ping handler sends a pong +// message to the peer. +// +// Connections handle received pong messages by calling the handler function +// set with the SetPongHandler method. The default pong handler does nothing. +// If an application sends ping messages, then the application should set a +// pong handler to receive the corresponding pong. +// +// The control message handler functions are called from the NextReader, +// ReadMessage and message reader Read methods. The default close and ping +// handlers can block these methods for a short time when the handler writes to +// the connection. +// +// The application must read the connection to process close, ping and pong +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and the Origin host is +// not equal to the Host request header. +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Buffers +// +// Connections buffer network input and output to reduce the number +// of system calls when reading or writing messages. +// +// Write buffers are also used for constructing WebSocket frames. See RFC 6455, +// Section 5 for a discussion of message framing. A WebSocket frame header is +// written to the network each time a write buffer is flushed to the network. +// Decreasing the size of the write buffer can increase the amount of framing +// overhead on the connection. +// +// The buffer sizes in bytes are specified by the ReadBufferSize and +// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default +// size of 4096 when a buffer size field is set to zero. The Upgrader reuses +// buffers created by the HTTP server when a buffer size field is set to zero. +// The HTTP server buffers have a size of 4096 at the time of this writing. +// +// The buffer sizes do not limit the size of a message that can be read or +// written by a connection. +// +// Buffers are held for the lifetime of the connection by default. If the +// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the +// write buffer only when writing a message. +// +// Applications should tune the buffer sizes to balance memory use and +// performance. Increasing the buffer size uses more memory, but can reduce the +// number of system calls to read or write the network. In the case of writing, +// increasing the buffer size can reduce the number of frame headers written to +// the network. +// +// Some guidelines for setting buffer parameters are: +// +// Limit the buffer sizes to the maximum expected message size. Buffers larger +// than the largest message do not provide any benefit. +// +// Depending on the distribution of message sizes, setting the buffer size to +// a value less than the maximum expected message size can greatly reduce memory +// use with a small impact on performance. Here's an example: If 99% of the +// messages are smaller than 256 bytes and the maximum message size is 512 +// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls +// than a buffer size of 512 bytes. The memory savings is 50%. +// +// A write buffer pool is useful when the application has a modest number +// writes over a large number of connections. when buffers are pooled, a larger +// buffer size has a reduced impact on total memory use and has the benefit of +// reducing system calls and frame overhead. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go new file mode 100644 index 0000000000..c64f8c8290 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/join.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "io" + "strings" +) + +// JoinMessages concatenates received messages to create a single io.Reader. +// The string term is appended to each message. The returned reader does not +// support concurrent calls to the Read method. +func JoinMessages(c *Conn, term string) io.Reader { + return &joinReader{c: c, term: term} +} + +type joinReader struct { + c *Conn + term string + r io.Reader +} + +func (r *joinReader) Read(p []byte) (int, error) { + if r.r == nil { + var err error + _, r.r, err = r.c.NextReader() + if err != nil { + return 0, err + } + if r.term != "" { + r.r = io.MultiReader(r.r, strings.NewReader(r.term)) + } + } + n, err := r.r.Read(p) + if err == io.EOF { + err = nil + r.r = nil + } + return n, err +} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 0000000000..dc2c1f6415 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v as a message. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 0000000000..d0742bf2a5 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,55 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build !appengine +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 0000000000..36250ca7c4 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build appengine +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 0000000000..c854225e96 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,102 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan struct{}, 1) + mu <- struct{}{} + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 0000000000..e0f466b72f --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "net" + "net/http" + "net/url" + "strings" +) + +type netDialerFunc func(network, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + forwardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.forwardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + conn.Close() + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + + if resp.StatusCode != 200 { + conn.Close() + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 0000000000..24d53b38ab --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,365 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +// +// It is safe to call Upgrader's methods concurrently. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is not nil, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. If there's no match, then no protocol is + // negotiated (the Sec-Websocket-Protocol header is not included in the + // handshake response). + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, then a safe default is used: return false if the + // Origin request header is present and the origin host is not equal to + // request Host header. + // + // A CheckOrigin function should carefully validate the request origin to + // prevent cross-site request forgery. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return equalASCIIFold(u.Host, r.Host) +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie). To specify +// subprotocols supported by the server, set Upgrader.Subprotocols directly. +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + const badHandshake = "websocket: the client is not using the websocket protocol: " + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + } + + if r.Method != http.MethodGet { + return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err := h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + var br *bufio.Reader + if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { + // Reuse hijacked buffered reader as connection reader. + br = brw.Reader + } + + buf := bufioWriterBuffer(netConn, brw.Writer) + + var writeBuf []byte + if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { + // Reuse hijacked write buffer as connection buffer. + writeBuf = buf + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + // Use larger of hijacked buffer and connection write buffer for header. + p := buf + if len(c.writeBuf) > len(p) { + p = c.writeBuf + } + p = p[:0] + + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-WebSocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// Deprecated: Use websocket.Upgrader instead. +// +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", http.StatusForbidden) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} + +// bufioReaderSize size returns the size of a bufio.Reader. +func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { + // This code assumes that peek on a reset reader returns + // bufio.Reader.buf[:0]. + // TODO: Use bufio.Reader.Size() after Go 1.10 + br.Reset(originalReader) + if p, err := br.Peek(0); err == nil { + return cap(p) + } + return 0 +} + +// writeHook is an io.Writer that records the last slice passed to it vio +// io.Writer.Write. +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +// bufioWriterBuffer grabs the buffer from a bufio.Writer. +func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { + // This code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + bw.Reset(&wh) + bw.WriteByte(0) + bw.Flush() + + bw.Reset(originalWriter) + + return wh.p[:cap(wh.p)] +} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go new file mode 100644 index 0000000000..a62b68ccb1 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -0,0 +1,21 @@ +//go:build go1.17 +// +build go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.HandshakeContext(ctx); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go new file mode 100644 index 0000000000..e1b2b44f6e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake_116.go @@ -0,0 +1,21 @@ +//go:build !go1.17 +// +build !go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.Handshake(); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 0000000000..7bf2f66c67 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,283 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" + "unicode/utf8" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Token octets per RFC 2616. +var isTokenOctet = [256]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +// skipSpace returns a slice of the string s with all leading RFC 2616 linear +// whitespace removed. +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if b := s[i]; b != ' ' && b != '\t' { + break + } + } + return s[i:] +} + +// nextToken returns the leading RFC 2616 token of s and the string following +// the token. +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if !isTokenOctet[s[i]] { + break + } + } + return s[:i], s[i:] +} + +// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 +// and the string following the token or quoted string. +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// equalASCIIFold returns true if s is equal to t with ASCII case folding as +// defined in RFC 4790. +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains a token equal to value with ASCII case folding. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if equalASCIIFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensions parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 0000000000..2e668f6b88 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/github.com/imdario/mergo/CONTRIBUTING.md new file mode 100644 index 0000000000..0a1ff9f94d --- /dev/null +++ b/vendor/github.com/imdario/mergo/CONTRIBUTING.md @@ -0,0 +1,112 @@ + +# Contributing to mergo + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) + +## Code of Conduct + +This project and everyone participating in it is governed by the +[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to <>. + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) +- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 7e6f7aeee8..ffbbb62c70 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -1,17 +1,20 @@ # Mergo - -[![GoDoc][3]][4] [![GitHub release][5]][6] [![GoCard][7]][8] -[![Build Status][1]][2] -[![Coverage Status][9]][10] +[![Test status][1]][2] +[![OpenSSF Scorecard][21]][22] +[![OpenSSF Best Practices][19]][20] +[![Coverage status][9]][10] [![Sourcegraph][11]][12] -[![FOSSA Status][13]][14] +[![FOSSA status][13]][14] + +[![GoDoc][3]][4] [![Become my sponsor][15]][16] +[![Tidelift][17]][18] -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo +[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master +[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo [5]: https://img.shields.io/github/release/imdario/mergo.svg @@ -26,6 +29,12 @@ [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield [15]: https://img.shields.io/github/sponsors/imdario [16]: https://github.com/sponsors/imdario +[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo +[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo +[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge +[20]: https://bestpractices.coreinfrastructure.org/projects/7177 +[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge +[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -55,7 +64,6 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont ### Mergo in the wild -- [cli/cli](https://github.com/cli/cli) - [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [vmware/dispatch](https://github.com/vmware/dispatch) @@ -231,5 +239,4 @@ Written by [Dario Castañé](http://dario.im). [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/github.com/imdario/mergo/SECURITY.md new file mode 100644 index 0000000000..a5de61f77b --- /dev/null +++ b/vendor/github.com/imdario/mergo/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.3.x | :white_check_mark: | +| < 0.3 | :x: | + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index a13a7ee46c..b50d5c2a4e 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } zeroValue := reflect.Value{} switch dst.Kind() { @@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { dstMap[fieldName] = src.Field(i).Interface() } } @@ -142,7 +142,7 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { func _map(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 8b4e2f47a0..0ef9b2138c 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -38,10 +38,11 @@ func isExportedComponent(field *reflect.StructField) bool { } type Config struct { + Transformers Transformers Overwrite bool + ShouldNotDereference bool AppendSlice bool TypeCheck bool - Transformers Transformers overwriteWithEmptyValue bool overwriteSliceWithEmptyValue bool sliceDeepCopy bool @@ -76,7 +77,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { @@ -95,7 +96,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { dst.Set(src) } } @@ -110,7 +111,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if src.Kind() != reflect.Map { - if overwrite { + if overwrite && dst.CanSet() { dst.Set(src) } return @@ -162,7 +163,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { if typeCheck && srcSlice.Type() != dstSlice.Type() { return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } @@ -194,22 +195,38 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue + + if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { + continue + } + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { + continue + } } - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } dst.SetMapIndex(key, srcElement) } } + + // Ensure that all keys in dst are deleted if they are not in src. + if overwriteWithEmptySrc { + for _, key := range dst.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + dst.SetMapIndex(key, reflect.Value{}) + } + } + } case reflect.Slice: if !dst.CanSet() { break } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { @@ -244,12 +261,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if src.Kind() != reflect.Interface { if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + if !config.ShouldNotDereference { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else { + if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { + dst.Set(src) + } } } else if dst.Elem().Type() == src.Type() { if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { @@ -262,7 +285,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } break @@ -275,7 +298,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co break } default: - mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) if mustSet { if dst.CanSet() { dst.Set(src) @@ -326,6 +349,12 @@ func WithOverrideEmptySlice(config *Config) { config.overwriteSliceWithEmptyValue = true } +// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty +// (i.e. a non-nil pointer is never considered empty). +func WithoutDereference(config *Config) { + config.ShouldNotDereference = true +} + // WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true @@ -344,7 +373,7 @@ func WithSliceDeepCopy(config *Config) { func merge(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index 9fe362d476..0a721e2d85 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -20,7 +20,7 @@ var ( ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerAgument = errors.New("dst must be a pointer") + ErrNonPointerArgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -28,13 +28,13 @@ var ( // checks in progress are true when it reencounters them. // Visited are stored in a map indexed by 17 * a1 + a2; type visit struct { - ptr uintptr typ reflect.Type next *visit + ptr uintptr } // From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { +func isEmptyValue(v reflect.Value, shouldDereference bool) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 @@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool { if v.IsNil() { return true } - return isEmptyValue(v.Elem()) + if shouldDereference { + return isEmptyValue(v.Elem(), shouldDereference) + } + return false case reflect.Func: return v.IsNil() case reflect.Invalid: diff --git a/vendor/github.com/mxk/go-flowrate/LICENSE b/vendor/github.com/mxk/go-flowrate/LICENSE new file mode 100644 index 0000000000..e9f9f628ba --- /dev/null +++ b/vendor/github.com/mxk/go-flowrate/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2014 The Go-FlowRate Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + + * Neither the name of the go-flowrate project nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go b/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go new file mode 100644 index 0000000000..1b727721e1 --- /dev/null +++ b/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go @@ -0,0 +1,267 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +// Package flowrate provides the tools for monitoring and limiting the flow rate +// of an arbitrary data stream. +package flowrate + +import ( + "math" + "sync" + "time" +) + +// Monitor monitors and limits the transfer rate of a data stream. +type Monitor struct { + mu sync.Mutex // Mutex guarding access to all internal fields + active bool // Flag indicating an active transfer + start time.Duration // Transfer start time (clock() value) + bytes int64 // Total number of bytes transferred + samples int64 // Total number of samples taken + + rSample float64 // Most recent transfer rate sample (bytes per second) + rEMA float64 // Exponential moving average of rSample + rPeak float64 // Peak transfer rate (max of all rSamples) + rWindow float64 // rEMA window (seconds) + + sBytes int64 // Number of bytes transferred since sLast + sLast time.Duration // Most recent sample time (stop time when inactive) + sRate time.Duration // Sampling rate + + tBytes int64 // Number of bytes expected in the current transfer + tLast time.Duration // Time of the most recent transfer of at least 1 byte +} + +// New creates a new flow control monitor. Instantaneous transfer rate is +// measured and updated for each sampleRate interval. windowSize determines the +// weight of each sample in the exponential moving average (EMA) calculation. +// The exact formulas are: +// +// sampleTime = currentTime - prevSampleTime +// sampleRate = byteCount / sampleTime +// weight = 1 - exp(-sampleTime/windowSize) +// newRate = weight*sampleRate + (1-weight)*oldRate +// +// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, +// respectively. +func New(sampleRate, windowSize time.Duration) *Monitor { + if sampleRate = clockRound(sampleRate); sampleRate <= 0 { + sampleRate = 5 * clockRate + } + if windowSize <= 0 { + windowSize = 1 * time.Second + } + now := clock() + return &Monitor{ + active: true, + start: now, + rWindow: windowSize.Seconds(), + sLast: now, + sRate: sampleRate, + tLast: now, + } +} + +// Update records the transfer of n bytes and returns n. It should be called +// after each Read/Write operation, even if n is 0. +func (m *Monitor) Update(n int) int { + m.mu.Lock() + m.update(n) + m.mu.Unlock() + return n +} + +// IO is a convenience method intended to wrap io.Reader and io.Writer method +// execution. It calls m.Update(n) and then returns (n, err) unmodified. +func (m *Monitor) IO(n int, err error) (int, error) { + return m.Update(n), err +} + +// Done marks the transfer as finished and prevents any further updates or +// limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and +// Limit methods become NOOPs. It returns the total number of bytes transferred. +func (m *Monitor) Done() int64 { + m.mu.Lock() + if now := m.update(0); m.sBytes > 0 { + m.reset(now) + } + m.active = false + m.tLast = 0 + n := m.bytes + m.mu.Unlock() + return n +} + +// timeRemLimit is the maximum Status.TimeRem value. +const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second + +// Status represents the current Monitor status. All transfer rates are in bytes +// per second rounded to the nearest byte. +type Status struct { + Active bool // Flag indicating an active transfer + Start time.Time // Transfer start time + Duration time.Duration // Time period covered by the statistics + Idle time.Duration // Time since the last transfer of at least 1 byte + Bytes int64 // Total number of bytes transferred + Samples int64 // Total number of samples taken + InstRate int64 // Instantaneous transfer rate + CurRate int64 // Current transfer rate (EMA of InstRate) + AvgRate int64 // Average transfer rate (Bytes / Duration) + PeakRate int64 // Maximum instantaneous transfer rate + BytesRem int64 // Number of bytes remaining in the transfer + TimeRem time.Duration // Estimated time to completion + Progress Percent // Overall transfer progress +} + +// Status returns current transfer status information. The returned value +// becomes static after a call to Done. +func (m *Monitor) Status() Status { + m.mu.Lock() + now := m.update(0) + s := Status{ + Active: m.active, + Start: clockToTime(m.start), + Duration: m.sLast - m.start, + Idle: now - m.tLast, + Bytes: m.bytes, + Samples: m.samples, + PeakRate: round(m.rPeak), + BytesRem: m.tBytes - m.bytes, + Progress: percentOf(float64(m.bytes), float64(m.tBytes)), + } + if s.BytesRem < 0 { + s.BytesRem = 0 + } + if s.Duration > 0 { + rAvg := float64(s.Bytes) / s.Duration.Seconds() + s.AvgRate = round(rAvg) + if s.Active { + s.InstRate = round(m.rSample) + s.CurRate = round(m.rEMA) + if s.BytesRem > 0 { + if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 { + ns := float64(s.BytesRem) / tRate * 1e9 + if ns > float64(timeRemLimit) { + ns = float64(timeRemLimit) + } + s.TimeRem = clockRound(time.Duration(ns)) + } + } + } + } + m.mu.Unlock() + return s +} + +// Limit restricts the instantaneous (per-sample) data flow to rate bytes per +// second. It returns the maximum number of bytes (0 <= n <= want) that may be +// transferred immediately without exceeding the limit. If block == true, the +// call blocks until n > 0. want is returned unmodified if want < 1, rate < 1, +// or the transfer is inactive (after a call to Done). +// +// At least one byte is always allowed to be transferred in any given sampling +// period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate +// is 10 bytes per second. +// +// For usage examples, see the implementation of Reader and Writer in io.go. +func (m *Monitor) Limit(want int, rate int64, block bool) (n int) { + if want < 1 || rate < 1 { + return want + } + m.mu.Lock() + + // Determine the maximum number of bytes that can be sent in one sample + limit := round(float64(rate) * m.sRate.Seconds()) + if limit <= 0 { + limit = 1 + } + + // If block == true, wait until m.sBytes < limit + if now := m.update(0); block { + for m.sBytes >= limit && m.active { + now = m.waitNextSample(now) + } + } + + // Make limit <= want (unlimited if the transfer is no longer active) + if limit -= m.sBytes; limit > int64(want) || !m.active { + limit = int64(want) + } + m.mu.Unlock() + + if limit < 0 { + limit = 0 + } + return int(limit) +} + +// SetTransferSize specifies the total size of the data transfer, which allows +// the Monitor to calculate the overall progress and time to completion. +func (m *Monitor) SetTransferSize(bytes int64) { + if bytes < 0 { + bytes = 0 + } + m.mu.Lock() + m.tBytes = bytes + m.mu.Unlock() +} + +// update accumulates the transferred byte count for the current sample until +// clock() - m.sLast >= m.sRate. The monitor status is updated once the current +// sample is done. +func (m *Monitor) update(n int) (now time.Duration) { + if !m.active { + return + } + if now = clock(); n > 0 { + m.tLast = now + } + m.sBytes += int64(n) + if sTime := now - m.sLast; sTime >= m.sRate { + t := sTime.Seconds() + if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak { + m.rPeak = m.rSample + } + + // Exponential moving average using a method similar to *nix load + // average calculation. Longer sampling periods carry greater weight. + if m.samples > 0 { + w := math.Exp(-t / m.rWindow) + m.rEMA = m.rSample + w*(m.rEMA-m.rSample) + } else { + m.rEMA = m.rSample + } + m.reset(now) + } + return +} + +// reset clears the current sample state in preparation for the next sample. +func (m *Monitor) reset(sampleTime time.Duration) { + m.bytes += m.sBytes + m.samples++ + m.sBytes = 0 + m.sLast = sampleTime +} + +// waitNextSample sleeps for the remainder of the current sample. The lock is +// released and reacquired during the actual sleep period, so it's possible for +// the transfer to be inactive when this method returns. +func (m *Monitor) waitNextSample(now time.Duration) time.Duration { + const minWait = 5 * time.Millisecond + current := m.sLast + + // sleep until the last sample time changes (ideally, just one iteration) + for m.sLast == current && m.active { + d := current + m.sRate - now + m.mu.Unlock() + if d < minWait { + d = minWait + } + time.Sleep(d) + m.mu.Lock() + now = m.update(0) + } + return now +} diff --git a/vendor/github.com/mxk/go-flowrate/flowrate/io.go b/vendor/github.com/mxk/go-flowrate/flowrate/io.go new file mode 100644 index 0000000000..fbe0909725 --- /dev/null +++ b/vendor/github.com/mxk/go-flowrate/flowrate/io.go @@ -0,0 +1,133 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "errors" + "io" +) + +// ErrLimit is returned by the Writer when a non-blocking write is short due to +// the transfer rate limit. +var ErrLimit = errors.New("flowrate: flow rate limit exceeded") + +// Limiter is implemented by the Reader and Writer to provide a consistent +// interface for monitoring and controlling data transfer. +type Limiter interface { + Done() int64 + Status() Status + SetTransferSize(bytes int64) + SetLimit(new int64) (old int64) + SetBlocking(new bool) (old bool) +} + +// Reader implements io.ReadCloser with a restriction on the rate of data +// transfer. +type Reader struct { + io.Reader // Data source + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be read due to the limit +} + +// NewReader restricts all Read operations on r to limit bytes per second. +func NewReader(r io.Reader, limit int64) *Reader { + return &Reader{r, New(0, 0), limit, true} +} + +// Read reads up to len(p) bytes into p without exceeding the current transfer +// rate limit. It returns (0, nil) immediately if r is non-blocking and no new +// bytes can be read at this time. +func (r *Reader) Read(p []byte) (n int, err error) { + p = p[:r.Limit(len(p), r.limit, r.block)] + if len(p) > 0 { + n, err = r.IO(r.Reader.Read(p)) + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (r *Reader) SetLimit(new int64) (old int64) { + old, r.limit = r.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Read call on a non-blocking reader returns immediately if no additional bytes +// may be read at this time due to the rate limit. +func (r *Reader) SetBlocking(new bool) (old bool) { + old, r.block = r.block, new + return +} + +// Close closes the underlying reader if it implements the io.Closer interface. +func (r *Reader) Close() error { + defer r.Done() + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + +// Writer implements io.WriteCloser with a restriction on the rate of data +// transfer. +type Writer struct { + io.Writer // Data destination + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be written due to the limit +} + +// NewWriter restricts all Write operations on w to limit bytes per second. The +// transfer rate and the default blocking behavior (true) can be changed +// directly on the returned *Writer. +func NewWriter(w io.Writer, limit int64) *Writer { + return &Writer{w, New(0, 0), limit, true} +} + +// Write writes len(p) bytes from p to the underlying data stream without +// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is +// non-blocking and no additional bytes can be written at this time. +func (w *Writer) Write(p []byte) (n int, err error) { + var c int + for len(p) > 0 && err == nil { + s := p[:w.Limit(len(p), w.limit, w.block)] + if len(s) > 0 { + c, err = w.IO(w.Writer.Write(s)) + } else { + return n, ErrLimit + } + p = p[c:] + n += c + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (w *Writer) SetLimit(new int64) (old int64) { + old, w.limit = w.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Write call on a non-blocking writer returns as soon as no additional bytes +// may be written at this time due to the rate limit. +func (w *Writer) SetBlocking(new bool) (old bool) { + old, w.block = w.block, new + return +} + +// Close closes the underlying writer if it implements the io.Closer interface. +func (w *Writer) Close() error { + defer w.Done() + if c, ok := w.Writer.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/vendor/github.com/mxk/go-flowrate/flowrate/util.go b/vendor/github.com/mxk/go-flowrate/flowrate/util.go new file mode 100644 index 0000000000..4caac583fc --- /dev/null +++ b/vendor/github.com/mxk/go-flowrate/flowrate/util.go @@ -0,0 +1,67 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "math" + "strconv" + "time" +) + +// clockRate is the resolution and precision of clock(). +const clockRate = 20 * time.Millisecond + +// czero is the process start time rounded down to the nearest clockRate +// increment. +var czero = time.Duration(time.Now().UnixNano()) / clockRate * clockRate + +// clock returns a low resolution timestamp relative to the process start time. +func clock() time.Duration { + return time.Duration(time.Now().UnixNano())/clockRate*clockRate - czero +} + +// clockToTime converts a clock() timestamp to an absolute time.Time value. +func clockToTime(c time.Duration) time.Time { + return time.Unix(0, int64(czero+c)) +} + +// clockRound returns d rounded to the nearest clockRate increment. +func clockRound(d time.Duration) time.Duration { + return (d + clockRate>>1) / clockRate * clockRate +} + +// round returns x rounded to the nearest int64 (non-negative values only). +func round(x float64) int64 { + if _, frac := math.Modf(x); frac >= 0.5 { + return int64(math.Ceil(x)) + } + return int64(math.Floor(x)) +} + +// Percent represents a percentage in increments of 1/1000th of a percent. +type Percent uint32 + +// percentOf calculates what percent of the total is x. +func percentOf(x, total float64) Percent { + if x < 0 || total <= 0 { + return 0 + } else if p := round(x / total * 1e5); p <= math.MaxUint32 { + return Percent(p) + } + return Percent(math.MaxUint32) +} + +func (p Percent) Float() float64 { + return float64(p) * 1e-3 +} + +func (p Percent) String() string { + var buf [12]byte + b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10) + n := len(b) + b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10) + b[n] = '.' + return string(append(b, '%')) +} diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md index 58141f02f1..0e9e145935 100644 --- a/vendor/github.com/spf13/cast/README.md +++ b/vendor/github.com/spf13/cast/README.md @@ -1,9 +1,9 @@ # cast -[![Build Status](https://github.com/spf13/cast/actions/workflows/ci.yml/badge.svg)](https://github.com/spf13/cast/actions/workflows/ci.yml) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/cast/actions/workflows/ci.yaml) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/cast)](https://pkg.go.dev/mod/github.com/spf13/cast) ![Go Version](https://img.shields.io/badge/go%20version-%3E=1.16-61CFDD.svg?style=flat-square) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast?style=flat-square)](https://goreportcard.com/report/github.com/spf13/cast) Easy and safe casting from one type to another in Go diff --git a/vendor/github.com/vishvananda/netlink/class.go b/vendor/github.com/vishvananda/netlink/class.go index 10ceffed8b..e686f67450 100644 --- a/vendor/github.com/vishvananda/netlink/class.go +++ b/vendor/github.com/vishvananda/netlink/class.go @@ -47,6 +47,7 @@ type ClassStatistics struct { Basic *GnetStatsBasic Queue *GnetStatsQueue RateEst *GnetStatsRateEst + BasicHw *GnetStatsBasic // Hardward statistics added in kernel 4.20 } // NewClassStatistics Construct a ClassStatistics struct which fields are all initialized by 0. @@ -55,6 +56,7 @@ func NewClassStatistics() *ClassStatistics { Basic: &GnetStatsBasic{}, Queue: &GnetStatsQueue{}, RateEst: &GnetStatsRateEst{}, + BasicHw: &GnetStatsBasic{}, } } diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go index 6f542ba4e7..a82eb09de2 100644 --- a/vendor/github.com/vishvananda/netlink/class_linux.go +++ b/vendor/github.com/vishvananda/netlink/class_linux.go @@ -388,6 +388,11 @@ func parseTcStats2(data []byte) (*ClassStatistics, error) { return nil, fmt.Errorf("Failed to parse ClassStatistics.RateEst with: %v\n%s", err, hex.Dump(datum.Value)) } + case nl.TCA_STATS_BASIC_HW: + if err := parseGnetStats(datum.Value, stats.BasicHw); err != nil { + return nil, fmt.Errorf("Failed to parse ClassStatistics.BasicHw with: %v\n%s", + err, hex.Dump(datum.Value)) + } } } diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go index fe41813359..84e1ca7a49 100644 --- a/vendor/github.com/vishvananda/netlink/filter.go +++ b/vendor/github.com/vishvananda/netlink/filter.go @@ -118,17 +118,32 @@ func (a TcPolAct) String() string { } type ActionAttrs struct { - Index int - Capab int - Action TcAct - Refcnt int - Bindcnt int + Index int + Capab int + Action TcAct + Refcnt int + Bindcnt int + Statistics *ActionStatistic + Timestamp *ActionTimestamp } func (q ActionAttrs) String() string { return fmt.Sprintf("{Index: %d, Capab: %x, Action: %s, Refcnt: %d, Bindcnt: %d}", q.Index, q.Capab, q.Action.String(), q.Refcnt, q.Bindcnt) } +type ActionTimestamp struct { + Installed uint64 + LastUsed uint64 + Expires uint64 + FirstUsed uint64 +} + +func (t ActionTimestamp) String() string { + return fmt.Sprintf("Installed %d LastUsed %d Expires %d FirstUsed %d", t.Installed, t.LastUsed, t.Expires, t.FirstUsed) +} + +type ActionStatistic ClassStatistics + // Action represents an action in any supported filter. type Action interface { Attrs() *ActionAttrs @@ -418,3 +433,30 @@ func (filter *GenericFilter) Attrs() *FilterAttrs { func (filter *GenericFilter) Type() string { return filter.FilterType } + +type PeditAction struct { + ActionAttrs + Proto uint8 + SrcMacAddr net.HardwareAddr + DstMacAddr net.HardwareAddr + SrcIP net.IP + DstIP net.IP + SrcPort uint16 + DstPort uint16 +} + +func (p *PeditAction) Attrs() *ActionAttrs { + return &p.ActionAttrs +} + +func (p *PeditAction) Type() string { + return "pedit" +} + +func NewPeditAction() *PeditAction { + return &PeditAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go index fbd434aa93..536e26825a 100644 --- a/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -528,6 +528,14 @@ func toAttrs(tcgen *nl.TcGen, attrs *ActionAttrs) { attrs.Bindcnt = int(tcgen.Bindcnt) } +func toTimeStamp(tcf *nl.Tcf) *ActionTimestamp { + return &ActionTimestamp{ + Installed: tcf.Install, + LastUsed: tcf.LastUse, + Expires: tcf.Expires, + FirstUsed: tcf.FirstUse} +} + func encodePolice(attr *nl.RtAttr, action *PoliceAction) error { var rtab [256]uint32 var ptab [256]uint32 @@ -692,6 +700,29 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { gen := nl.TcGen{} toTcGen(action.Attrs(), &gen) aopts.AddRtAttr(nl.TCA_GACT_PARMS, gen.Serialize()) + case *PeditAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + pedit := nl.TcPedit{} + if action.SrcMacAddr != nil { + pedit.SetEthSrc(action.SrcMacAddr) + } + if action.DstMacAddr != nil { + pedit.SetEthDst(action.DstMacAddr) + } + if action.SrcIP != nil { + pedit.SetSrcIP(action.SrcIP) + } + if action.DstIP != nil { + pedit.SetDstIP(action.DstIP) + } + if action.SrcPort != 0 { + pedit.SetSrcPort(action.SrcPort, action.Proto) + } + if action.DstPort != 0 { + pedit.SetDstPort(action.DstPort, action.Proto) + } + pedit.Encode(table) } } return nil @@ -725,6 +756,8 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { for _, table := range tables { var action Action var actionType string + var actionnStatistic *ActionStatistic + var actionTimestamp *ActionTimestamp aattrs, err := nl.ParseRouteAttr(table.Value) if err != nil { return nil, err @@ -752,6 +785,8 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action = &SkbEditAction{} case "police": action = &PoliceAction{} + case "pedit": + action = &PeditAction{} default: break nextattr } @@ -770,7 +805,11 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { toAttrs(&mirred.TcGen, action.Attrs()) action.(*MirredAction).Ifindex = int(mirred.Ifindex) action.(*MirredAction).MirredAction = MirredAct(mirred.Eaction) + case nl.TCA_MIRRED_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } + case "tunnel_key": switch adatum.Attr.Type { case nl.TCA_TUNNEL_KEY_PARMS: @@ -786,6 +825,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action.(*TunnelKeyAction).DstAddr = adatum.Value[:] case nl.TCA_TUNNEL_KEY_ENC_DST_PORT: action.(*TunnelKeyAction).DestPort = ntohs(adatum.Value) + case nl.TCA_TUNNEL_KEY_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "skbedit": switch adatum.Attr.Type { @@ -808,6 +850,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { case nl.TCA_SKBEDIT_QUEUE_MAPPING: mapping := native.Uint16(adatum.Value[0:2]) action.(*SkbEditAction).QueueMapping = &mapping + case nl.TCA_SKBEDIT_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "bpf": switch adatum.Attr.Type { @@ -818,6 +863,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action.(*BpfAction).Fd = int(native.Uint32(adatum.Value[0:4])) case nl.TCA_ACT_BPF_NAME: action.(*BpfAction).Name = string(adatum.Value[:len(adatum.Value)-1]) + case nl.TCA_ACT_BPF_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "connmark": switch adatum.Attr.Type { @@ -826,6 +874,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action.(*ConnmarkAction).ActionAttrs = ActionAttrs{} toAttrs(&connmark.TcGen, action.Attrs()) action.(*ConnmarkAction).Zone = connmark.Zone + case nl.TCA_CONNMARK_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "csum": switch adatum.Attr.Type { @@ -834,6 +885,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action.(*CsumAction).ActionAttrs = ActionAttrs{} toAttrs(&csum.TcGen, action.Attrs()) action.(*CsumAction).UpdateFlags = CsumUpdateFlags(csum.UpdateFlags) + case nl.TCA_CSUM_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "gact": switch adatum.Attr.Type { @@ -843,13 +897,24 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { if action.Attrs().Action.String() == "goto" { action.(*GenericAction).Chain = TC_ACT_EXT_VAL_MASK & gen.Action } + case nl.TCA_GACT_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) } case "police": parsePolice(adatum, action.(*PoliceAction)) } } + case nl.TCA_ACT_STATS: + s, err := parseTcStats2(aattr.Value) + if err != nil { + return nil, err + } + actionnStatistic = (*ActionStatistic)(s) } } + action.Attrs().Statistics = actionnStatistic + action.Attrs().Timestamp = actionTimestamp actions = append(actions, action) } return actions, nil diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go index 37f5a5a4cd..e1695916a0 100644 --- a/vendor/github.com/vishvananda/netlink/link.go +++ b/vendor/github.com/vishvananda/netlink/link.go @@ -357,6 +357,46 @@ func (tuntap *Tuntap) Type() string { return "tuntap" } +type NetkitMode uint32 + +const ( + NETKIT_MODE_L2 NetkitMode = iota + NETKIT_MODE_L3 +) + +type NetkitPolicy int + +const ( + NETKIT_POLICY_FORWARD NetkitPolicy = 0 + NETKIT_POLICY_BLACKHOLE NetkitPolicy = 2 +) + +func (n *Netkit) IsPrimary() bool { + return n.isPrimary +} + +// SetPeerAttrs will not take effect if trying to modify an existing netkit device +func (n *Netkit) SetPeerAttrs(Attrs *LinkAttrs) { + n.peerLinkAttrs = *Attrs +} + +type Netkit struct { + LinkAttrs + Mode NetkitMode + Policy NetkitPolicy + PeerPolicy NetkitPolicy + isPrimary bool + peerLinkAttrs LinkAttrs +} + +func (n *Netkit) Attrs() *LinkAttrs { + return &n.LinkAttrs +} + +func (n *Netkit) Type() string { + return "netkit" +} + // Veth devices must specify PeerName on create type Veth struct { LinkAttrs diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go index 202c2b74c0..96468c4fab 100644 --- a/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/link_linux.go @@ -1634,6 +1634,10 @@ func (h *Handle) linkModify(link Link, flags int) error { if link.VlanProtocol != VLAN_PROTOCOL_UNKNOWN { data.AddRtAttr(nl.IFLA_VLAN_PROTOCOL, htons(uint16(link.VlanProtocol))) } + case *Netkit: + if err := addNetkitAttrs(link, linkInfo, flags); err != nil { + return err + } case *Veth: data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) peer := data.AddRtAttr(nl.VETH_INFO_PEER, nil) @@ -1947,6 +1951,8 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { link = &Bridge{} case "vlan": link = &Vlan{} + case "netkit": + link = &Netkit{} case "veth": link = &Veth{} case "wireguard": @@ -2004,6 +2010,8 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { return nil, err } switch linkType { + case "netkit": + parseNetkitData(link, data) case "vlan": parseVlanData(link, data) case "vxlan": @@ -2544,6 +2552,80 @@ func (h *Handle) LinkSetGroup(link Link, group int) error { return err } +func addNetkitAttrs(nk *Netkit, linkInfo *nl.RtAttr, flag int) error { + if nk.peerLinkAttrs.HardwareAddr != nil || nk.HardwareAddr != nil { + return fmt.Errorf("netkit doesn't support setting Ethernet") + } + + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + // Kernel will return error if trying to change the mode of an existing netkit device + data.AddRtAttr(nl.IFLA_NETKIT_MODE, nl.Uint32Attr(uint32(nk.Mode))) + data.AddRtAttr(nl.IFLA_NETKIT_POLICY, nl.Uint32Attr(uint32(nk.Policy))) + data.AddRtAttr(nl.IFLA_NETKIT_PEER_POLICY, nl.Uint32Attr(uint32(nk.PeerPolicy))) + + if (flag & unix.NLM_F_EXCL) == 0 { + // Modifying peer link attributes will not take effect + return nil + } + + peer := data.AddRtAttr(nl.IFLA_NETKIT_PEER_INFO, nil) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + if nk.peerLinkAttrs.Flags&net.FlagUp != 0 { + msg.Change = unix.IFF_UP + msg.Flags = unix.IFF_UP + } + if nk.peerLinkAttrs.Index != 0 { + msg.Index = int32(nk.peerLinkAttrs.Index) + } + peer.AddChild(msg) + if nk.peerLinkAttrs.Name != "" { + peer.AddRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(nk.peerLinkAttrs.Name)) + } + if nk.peerLinkAttrs.MTU > 0 { + peer.AddRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(nk.peerLinkAttrs.MTU))) + } + if nk.peerLinkAttrs.GSOMaxSegs > 0 { + peer.AddRtAttr(unix.IFLA_GSO_MAX_SEGS, nl.Uint32Attr(nk.peerLinkAttrs.GSOMaxSegs)) + } + if nk.peerLinkAttrs.GSOMaxSize > 0 { + peer.AddRtAttr(unix.IFLA_GSO_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GSOMaxSize)) + } + if nk.peerLinkAttrs.GSOIPv4MaxSize > 0 { + peer.AddRtAttr(unix.IFLA_GSO_IPV4_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GSOIPv4MaxSize)) + } + if nk.peerLinkAttrs.GROIPv4MaxSize > 0 { + peer.AddRtAttr(unix.IFLA_GRO_IPV4_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GROIPv4MaxSize)) + } + if nk.peerLinkAttrs.Namespace != nil { + switch ns := nk.peerLinkAttrs.Namespace.(type) { + case NsPid: + peer.AddRtAttr(unix.IFLA_NET_NS_PID, nl.Uint32Attr(uint32(ns))) + case NsFd: + peer.AddRtAttr(unix.IFLA_NET_NS_FD, nl.Uint32Attr(uint32(ns))) + } + } + return nil +} + +func parseNetkitData(link Link, data []syscall.NetlinkRouteAttr) { + netkit := link.(*Netkit) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_NETKIT_PRIMARY: + isPrimary := datum.Value[0:1][0] + if isPrimary != 0 { + netkit.isPrimary = true + } + case nl.IFLA_NETKIT_MODE: + netkit.Mode = NetkitMode(native.Uint32(datum.Value[0:4])) + case nl.IFLA_NETKIT_POLICY: + netkit.Policy = NetkitPolicy(native.Uint32(datum.Value[0:4])) + case nl.IFLA_NETKIT_PEER_POLICY: + netkit.PeerPolicy = NetkitPolicy(native.Uint32(datum.Value[0:4])) + } + } +} + func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) { vlan := link.(*Vlan) for _, datum := range data { diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go index fc04e47f23..6cb118fb5c 100644 --- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -31,6 +31,16 @@ const ( IFLA_VLAN_MAX = IFLA_VLAN_PROTOCOL ) +const ( + IFLA_NETKIT_UNSPEC = iota + IFLA_NETKIT_PEER_INFO + IFLA_NETKIT_PRIMARY + IFLA_NETKIT_POLICY + IFLA_NETKIT_PEER_POLICY + IFLA_NETKIT_MODE + IFLA_NETKIT_MAX = IFLA_NETKIT_MODE +) + const ( VETH_INFO_UNSPEC = iota VETH_INFO_PEER diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index 663ae48d11..e318e099cc 100644 --- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -559,6 +559,11 @@ done: if m.Header.Pid != pid { continue } + + if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 { + return nil, syscall.Errno(unix.EINTR) + } + if m.Header.Type == unix.NLMSG_DONE || m.Header.Type == unix.NLMSG_ERROR { native := NativeEndian() errno := int32(native.Uint32(m.Data[0:4])) @@ -569,7 +574,7 @@ done: err = syscall.Errno(-errno) unreadData := m.Data[4:] - if m.Header.Flags|unix.NLM_F_ACK_TLVS != 0 && len(unreadData) > syscall.SizeofNlMsghdr { + if m.Header.Flags&unix.NLM_F_ACK_TLVS != 0 && len(unreadData) > syscall.SizeofNlMsghdr { // Skip the echoed request message. echoReqH := (*syscall.NlMsghdr)(unsafe.Pointer(&unreadData[0])) unreadData = unreadData[nlmAlignOf(int(echoReqH.Len)):] @@ -661,12 +666,14 @@ func GetNetlinkSocketAt(newNs, curNs netns.NsHandle, protocol int) (*NetlinkSock // In case of success, the caller is expected to execute the returned function // at the end of the code that needs to be executed in the network namespace. // Example: -// func jobAt(...) error { -// d, err := executeInNetns(...) -// if err != nil { return err} -// defer d() -// < code which needs to be executed in specific netns> -// } +// +// func jobAt(...) error { +// d, err := executeInNetns(...) +// if err != nil { return err} +// defer d() +// < code which needs to be executed in specific netns> +// } +// // TODO: his function probably belongs to netns pkg. func executeInNetns(newNs, curNs netns.NsHandle) (func(), error) { var ( diff --git a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go index d8b60c2ed7..e9ce190c77 100644 --- a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go @@ -1,8 +1,10 @@ package nl import ( + "bytes" "encoding/binary" "fmt" + "net" "unsafe" "golang.org/x/sys/unix" @@ -87,7 +89,11 @@ const ( TCA_STATS_RATE_EST TCA_STATS_QUEUE TCA_STATS_APP - TCA_STATS_MAX = TCA_STATS_APP + TCA_STATS_RATE_EST64 + TCA_STATS_PAD + TCA_STATS_BASIC_HW + TCA_STATS_PKT64 + TCA_STATS_MAX = TCA_STATS_PKT64 ) const ( @@ -148,6 +154,18 @@ func (x *TcMsg) Serialize() []byte { return (*(*[SizeofTcMsg]byte)(unsafe.Pointer(x)))[:] } +type Tcf struct { + Install uint64 + LastUse uint64 + Expires uint64 + FirstUse uint64 +} + +func DeserializeTcf(b []byte) *Tcf { + const size = int(unsafe.Sizeof(Tcf{})) + return (*Tcf)(unsafe.Pointer(&b[0:size][0])) +} + // struct tcamsg { // unsigned char tca_family; // unsigned char tca__pad1; @@ -1071,14 +1089,14 @@ func (x *TcSfqQopt) Serialize() []byte { return (*(*[SizeofTcSfqQopt]byte)(unsafe.Pointer(x)))[:] } -// struct tc_sfqred_stats { -// __u32 prob_drop; /* Early drops, below max threshold */ -// __u32 forced_drop; /* Early drops, after max threshold */ -// __u32 prob_mark; /* Marked packets, below max threshold */ -// __u32 forced_mark; /* Marked packets, after max threshold */ -// __u32 prob_mark_head; /* Marked packets, below max threshold */ -// __u32 forced_mark_head;/* Marked packets, after max threshold */ -// }; +// struct tc_sfqred_stats { +// __u32 prob_drop; /* Early drops, below max threshold */ +// __u32 forced_drop; /* Early drops, after max threshold */ +// __u32 prob_mark; /* Marked packets, below max threshold */ +// __u32 forced_mark; /* Marked packets, after max threshold */ +// __u32 prob_mark_head; /* Marked packets, below max threshold */ +// __u32 forced_mark_head;/* Marked packets, after max threshold */ +// }; type TcSfqRedStats struct { ProbDrop uint32 ForcedDrop uint32 @@ -1100,22 +1118,26 @@ func (x *TcSfqRedStats) Serialize() []byte { return (*(*[SizeofTcSfqRedStats]byte)(unsafe.Pointer(x)))[:] } -// struct tc_sfq_qopt_v1 { -// struct tc_sfq_qopt v0; -// unsigned int depth; /* max number of packets per flow */ -// unsigned int headdrop; +// struct tc_sfq_qopt_v1 { +// struct tc_sfq_qopt v0; +// unsigned int depth; /* max number of packets per flow */ +// unsigned int headdrop; +// // /* SFQRED parameters */ -// __u32 limit; /* HARD maximal flow queue length (bytes) */ -// __u32 qth_min; /* Min average length threshold (bytes) */ -// __u32 qth_max; /* Max average length threshold (bytes) */ -// unsigned char Wlog; /* log(W) */ -// unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ -// unsigned char Scell_log; /* cell size for idle damping */ -// unsigned char flags; -// __u32 max_P; /* probability, high resolution */ +// +// __u32 limit; /* HARD maximal flow queue length (bytes) */ +// __u32 qth_min; /* Min average length threshold (bytes) */ +// __u32 qth_max; /* Max average length threshold (bytes) */ +// unsigned char Wlog; /* log(W) */ +// unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ +// unsigned char Scell_log; /* cell size for idle damping */ +// unsigned char flags; +// __u32 max_P; /* probability, high resolution */ +// // /* SFQRED stats */ -// struct tc_sfqred_stats stats; -// }; +// +// struct tc_sfqred_stats stats; +// }; type TcSfqQoptV1 struct { TcSfqQopt Depth uint32 @@ -1175,3 +1197,394 @@ func (i IPProto) String() string { } return fmt.Sprintf("%d", i) } + +const ( + MaxOffs = 128 + SizeOfPeditSel = 24 + SizeOfPeditKey = 24 + + TCA_PEDIT_KEY_EX_HTYPE = 1 + TCA_PEDIT_KEY_EX_CMD = 2 +) + +const ( + TCA_PEDIT_UNSPEC = iota + TCA_PEDIT_TM + TCA_PEDIT_PARMS + TCA_PEDIT_PAD + TCA_PEDIT_PARMS_EX + TCA_PEDIT_KEYS_EX + TCA_PEDIT_KEY_EX +) + +// /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It +// * means no specific header type - offset is relative to the network layer +// */ +type PeditHeaderType uint16 + +const ( + TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = iota + TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 + TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + TCA_PEDIT_KEY_EX_HDR_TYPE_TCP + TCA_PEDIT_KEY_EX_HDR_TYPE_UDP + __PEDIT_HDR_TYPE_MAX +) + +type PeditCmd uint16 + +const ( + TCA_PEDIT_KEY_EX_CMD_SET = 0 + TCA_PEDIT_KEY_EX_CMD_ADD = 1 +) + +type TcPeditSel struct { + TcGen + NKeys uint8 + Flags uint8 +} + +func DeserializeTcPeditKey(b []byte) *TcPeditKey { + return (*TcPeditKey)(unsafe.Pointer(&b[0:SizeOfPeditKey][0])) +} + +func DeserializeTcPedit(b []byte) (*TcPeditSel, []TcPeditKey) { + x := &TcPeditSel{} + copy((*(*[SizeOfPeditSel]byte)(unsafe.Pointer(x)))[:SizeOfPeditSel], b) + + var keys []TcPeditKey + + next := SizeOfPeditKey + var i uint8 + for i = 0; i < x.NKeys; i++ { + keys = append(keys, *DeserializeTcPeditKey(b[next:])) + next += SizeOfPeditKey + } + + return x, keys +} + +type TcPeditKey struct { + Mask uint32 + Val uint32 + Off uint32 + At uint32 + OffMask uint32 + Shift uint32 +} + +type TcPeditKeyEx struct { + HeaderType PeditHeaderType + Cmd PeditCmd +} + +type TcPedit struct { + Sel TcPeditSel + Keys []TcPeditKey + KeysEx []TcPeditKeyEx + Extend uint8 +} + +func (p *TcPedit) Encode(parent *RtAttr) { + parent.AddRtAttr(TCA_ACT_KIND, ZeroTerminated("pedit")) + actOpts := parent.AddRtAttr(TCA_ACT_OPTIONS, nil) + + bbuf := bytes.NewBuffer(make([]byte, 0, int(unsafe.Sizeof(p.Sel)+unsafe.Sizeof(p.Keys)))) + + bbuf.Write((*(*[SizeOfPeditSel]byte)(unsafe.Pointer(&p.Sel)))[:]) + + for i := uint8(0); i < p.Sel.NKeys; i++ { + bbuf.Write((*(*[SizeOfPeditKey]byte)(unsafe.Pointer(&p.Keys[i])))[:]) + } + actOpts.AddRtAttr(TCA_PEDIT_PARMS_EX, bbuf.Bytes()) + + exAttrs := actOpts.AddRtAttr(int(TCA_PEDIT_KEYS_EX|NLA_F_NESTED), nil) + for i := uint8(0); i < p.Sel.NKeys; i++ { + keyAttr := exAttrs.AddRtAttr(int(TCA_PEDIT_KEY_EX|NLA_F_NESTED), nil) + + htypeBuf := make([]byte, 2) + cmdBuf := make([]byte, 2) + + NativeEndian().PutUint16(htypeBuf, uint16(p.KeysEx[i].HeaderType)) + NativeEndian().PutUint16(cmdBuf, uint16(p.KeysEx[i].Cmd)) + + keyAttr.AddRtAttr(TCA_PEDIT_KEY_EX_HTYPE, htypeBuf) + keyAttr.AddRtAttr(TCA_PEDIT_KEY_EX_CMD, cmdBuf) + } +} + +func (p *TcPedit) SetEthDst(mac net.HardwareAddr) { + u32 := NativeEndian().Uint32(mac) + u16 := NativeEndian().Uint16(mac[4:]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = uint32(u16) + tKey.Mask = 0xffff0000 + tKey.Off = 4 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetEthSrc(mac net.HardwareAddr) { + u16 := NativeEndian().Uint16(mac) + u32 := NativeEndian().Uint32(mac[2:]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = uint32(u16) << 16 + tKey.Mask = 0x0000ffff + tKey.Off = 4 + + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Mask = 0 + tKey.Off = 8 + + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetIPv6Src(ip6 net.IP) { + u32 := NativeEndian().Uint32(ip6[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 8 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[4:8]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 12 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[8:12]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 16 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[12:16]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 20 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetDstIP(ip net.IP) { + if ip.To4() != nil { + p.SetIPv4Dst(ip) + } else { + p.SetIPv6Dst(ip) + } +} + +func (p *TcPedit) SetSrcIP(ip net.IP) { + if ip.To4() != nil { + p.SetIPv4Src(ip) + } else { + p.SetIPv6Src(ip) + } +} + +func (p *TcPedit) SetIPv6Dst(ip6 net.IP) { + u32 := NativeEndian().Uint32(ip6[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 24 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[4:8]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 28 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[8:12]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 32 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[12:16]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 36 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetIPv4Src(ip net.IP) { + u32 := NativeEndian().Uint32(ip[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 12 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} + +func (p *TcPedit) SetIPv4Dst(ip net.IP) { + u32 := NativeEndian().Uint32(ip[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 16 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} + +// SetDstPort only tcp and udp are supported to set port +func (p *TcPedit) SetDstPort(dstPort uint16, protocol uint8) { + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + switch protocol { + case unix.IPPROTO_TCP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_TCP + case unix.IPPROTO_UDP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_UDP + default: + return + } + + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + tKey.Val = uint32(Swap16(dstPort)) << 16 + tKey.Mask = 0x0000ffff + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} + +// SetSrcPort only tcp and udp are supported to set port +func (p *TcPedit) SetSrcPort(srcPort uint16, protocol uint8) { + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + switch protocol { + case unix.IPPROTO_TCP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_TCP + case unix.IPPROTO_UDP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_UDP + default: + return + } + + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + tKey.Val = uint32(Swap16(srcPort)) + tKey.Mask = 0xffff0000 + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go index e477bcd6c9..123519b663 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -268,6 +268,9 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { if qdisc.Buckets > 0 { options.AddRtAttr(nl.TCA_FQ_BUCKETS_LOG, nl.Uint32Attr((uint32(qdisc.Buckets)))) } + if qdisc.PacketLimit > 0 { + options.AddRtAttr(nl.TCA_FQ_PLIMIT, nl.Uint32Attr((uint32(qdisc.PacketLimit)))) + } if qdisc.LowRateThreshold > 0 { options.AddRtAttr(nl.TCA_FQ_LOW_RATE_THRESHOLD, nl.Uint32Attr((uint32(qdisc.LowRateThreshold)))) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go index 4e24f9eed6..6ca8d9ad6c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go @@ -14,17 +14,23 @@ import ( ) // ArrayCodec is the Codec used for bsoncore.Array values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// ArrayCodec registered. type ArrayCodec struct{} var defaultArrayCodec = NewArrayCodec() // NewArrayCodec returns an ArrayCodec. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// ArrayCodec registered. func NewArrayCodec() *ArrayCodec { return &ArrayCodec{} } // EncodeValue is the ValueEncoder for bsoncore.Array values. -func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tCoreArray { return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val} } @@ -34,7 +40,7 @@ func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val r } // DecodeValue is the ValueDecoder for bsoncore.Array values. -func (ac *ArrayCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tCoreArray { return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go index 098ed69f98..0693bd432f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -23,6 +23,8 @@ var ( // Marshaler is an interface implemented by types that can marshal themselves // into a BSON document represented as bytes. The bytes returned must be a valid // BSON document if the error is nil. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead. type Marshaler interface { MarshalBSON() ([]byte, error) } @@ -31,6 +33,8 @@ type Marshaler interface { // themselves into a BSON value as bytes. The type must be the valid type for // the bytes returned. The bytes and byte type together must be valid if the // error is nil. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead. type ValueMarshaler interface { MarshalBSONValue() (bsontype.Type, []byte, error) } @@ -39,6 +43,8 @@ type ValueMarshaler interface { // document representation of themselves. The BSON bytes can be assumed to be // valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data // after returning. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead. type Unmarshaler interface { UnmarshalBSON([]byte) error } @@ -47,6 +53,8 @@ type Unmarshaler interface { // BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead. type ValueUnmarshaler interface { UnmarshalBSONValue(bsontype.Type, []byte) error } @@ -111,13 +119,93 @@ func (vde ValueDecoderError) Error() string { // value. type EncodeContext struct { *Registry + + // MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, + // uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) + // that can represent the integer value. + // + // Deprecated: Use bson.Encoder.IntMinSize instead. MinSize bool + + errorOnInlineDuplicates bool + stringifyMapKeysWithFmt bool + nilMapAsEmpty bool + nilSliceAsEmpty bool + nilByteSliceAsEmpty bool + omitZeroStruct bool + useJSONStructTags bool +} + +// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in +// the marshaled BSON when the "inline" struct tag option is set. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. +func (ec *EncodeContext) ErrorOnInlineDuplicates() { + ec.errorOnInlineDuplicates = true +} + +// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name +// strings using fmt.Sprintf() instead of the default string conversion logic. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. +func (ec *EncodeContext) StringifyMapKeysWithFmt() { + ec.stringifyMapKeysWithFmt = true +} + +// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON +// null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. +func (ec *EncodeContext) NilMapAsEmpty() { + ec.nilMapAsEmpty = true +} + +// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON +// null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. +func (ec *EncodeContext) NilSliceAsEmpty() { + ec.nilSliceAsEmpty = true +} + +// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values +// instead of BSON null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. +func (ec *EncodeContext) NilByteSliceAsEmpty() { + ec.nilByteSliceAsEmpty = true +} + +// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) +// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. +// +// Note that the Encoder only examines exported struct fields when determining if a struct is the +// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. +func (ec *EncodeContext) OmitZeroStruct() { + ec.omitZeroStruct = true +} + +// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead. +func (ec *EncodeContext) UseJSONStructTags() { + ec.useJSONStructTags = true } // DecodeContext is the contextual information required for a Codec to decode a // value. type DecodeContext struct { *Registry + + // Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double" + // values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64, + // uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to + // BSON "decimal128" values. + // + // Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead. Truncate bool // Ancestor is the type of a containing document. This is mainly used to determine what type @@ -125,7 +213,7 @@ type DecodeContext struct { // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface // will be decoded into a bson.M. // - // Deprecated: Use DefaultDocumentM or DefaultDocumentD instead. + // Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead. Ancestor reflect.Type // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the @@ -133,22 +221,74 @@ type DecodeContext struct { // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an // error. DocumentType overrides the Ancestor field. defaultDocumentType reflect.Type + + binaryAsSlice bool + useJSONStructTags bool + useLocalTimeZone bool + zeroMaps bool + zeroStructs bool } -// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or +// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. +func (dc *DecodeContext) BinaryAsSlice() { + dc.binaryAsSlice = true +} + +// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. +func (dc *DecodeContext) UseJSONStructTags() { + dc.useJSONStructTags = true +} + +// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead +// of the UTC timezone. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. +func (dc *DecodeContext) UseLocalTimeZone() { + dc.useLocalTimeZone = true +} + +// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value +// passed to Decode before unmarshaling BSON documents into them. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. +func (dc *DecodeContext) ZeroMaps() { + dc.zeroMaps = true +} + +// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination +// value passed to Decode before unmarshaling BSON documents into them. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. +func (dc *DecodeContext) ZeroStructs() { + dc.zeroStructs = true +} + +// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead. func (dc *DecodeContext) DefaultDocumentM() { dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) } -// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead. func (dc *DecodeContext) DefaultDocumentD() { dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) } -// ValueCodec is the interface that groups the methods to encode and decode +// ValueCodec is an interface for encoding and decoding a reflect.Value. // values. +// +// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead. type ValueCodec interface { ValueEncoder ValueDecoder @@ -233,6 +373,10 @@ func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext // CodecZeroer is the interface implemented by Codecs that can also determine if // a value of the type that would be encoded is zero. +// +// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver +// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to +// nil instead. type CodecZeroer interface { IsTypeZero(interface{}) bool } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go index 5a916cc159..dde3e76815 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go @@ -16,18 +16,30 @@ import ( ) // ByteSliceCodec is the Codec used for []byte values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// ByteSliceCodec registered. type ByteSliceCodec struct { + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values + // instead of BSON null. + // + // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty instead. EncodeNilAsEmpty bool } var ( defaultByteSliceCodec = NewByteSliceCodec() - _ ValueCodec = defaultByteSliceCodec + // Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be + // used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. _ typeDecoder = defaultByteSliceCodec ) -// NewByteSliceCodec returns a StringCodec with options opts. +// NewByteSliceCodec returns a ByteSliceCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// ByteSliceCodec registered. func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) codec := ByteSliceCodec{} @@ -42,13 +54,13 @@ func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, if !val.IsValid() || val.Type() != tByteSlice { return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} } - if val.IsNil() && !bsc.EncodeNilAsEmpty { + if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty { return vw.WriteNull() } return vw.WriteBinary(val.Interface().([]byte)) } -func (bsc *ByteSliceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tByteSlice { return emptyValue, ValueDecoderError{ Name: "ByteSliceDecodeValue", diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index e95cab585f..b5e22c498a 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -48,6 +48,9 @@ func newDefaultStructCodec() *StructCodec { // DefaultValueDecoders is a namespace type for the default ValueDecoders used // when creating a registry. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. type DefaultValueDecoders struct{} // RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with @@ -56,6 +59,9 @@ type DefaultValueDecoders struct{} // There is no support for decoding map[string]interface{} because there is no decoder for // interface{}, so users must either register this decoder themselves or use the // EmptyInterfaceDecoder available in the bson package. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { if rb == nil { panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) @@ -132,6 +138,9 @@ func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { } // DDecodeValue is the ValueDecoderFunc for primitive.D instances. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || !val.CanSet() || val.Type() != tD { return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} @@ -188,7 +197,7 @@ func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueRe return nil } -func (dvd DefaultValueDecoders) booleanDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t.Kind() != reflect.Bool { return emptyValue, ValueDecoderError{ Name: "BooleanDecodeValue", @@ -235,6 +244,9 @@ func (dvd DefaultValueDecoders) booleanDecodeType(dctx DecodeContext, vr bsonrw. } // BooleanDecodeValue is the ValueDecoderFunc for bool types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} @@ -333,6 +345,9 @@ func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReade } // IntDecodeValue is the ValueDecoderFunc for int types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() { return ValueDecoderError{ @@ -434,7 +449,7 @@ func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.Valu return nil } -func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { var f float64 var err error switch vrType := vr.Type(); vrType { @@ -477,7 +492,7 @@ func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.Valu switch t.Kind() { case reflect.Float32: - if !ec.Truncate && float64(float32(f)) != f { + if !dc.Truncate && float64(float32(f)) != f { return emptyValue, errCannotTruncate } @@ -494,6 +509,9 @@ func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.Valu } // FloatDecodeValue is the ValueDecoderFunc for float types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() { return ValueDecoderError{ @@ -515,7 +533,7 @@ func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.Val // StringDecodeValue is the ValueDecoderFunc for string types. // // Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { var str string var err error switch vr.Type() { @@ -536,7 +554,7 @@ func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tJavaScript { return emptyValue, ValueDecoderError{ Name: "JavaScriptDecodeValue", @@ -565,6 +583,9 @@ func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.V } // JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tJavaScript { return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} @@ -579,7 +600,7 @@ func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bso return nil } -func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tSymbol { return emptyValue, ValueDecoderError{ Name: "SymbolDecodeValue", @@ -620,6 +641,9 @@ func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.Value } // SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tSymbol { return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} @@ -634,7 +658,7 @@ func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tBinary { return emptyValue, ValueDecoderError{ Name: "BinaryDecodeValue", @@ -664,6 +688,9 @@ func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueRe } // BinaryDecodeValue is the ValueDecoderFunc for Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tBinary { return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} @@ -678,7 +705,7 @@ func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.Va return nil } -func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tUndefined { return emptyValue, ValueDecoderError{ Name: "UndefinedDecodeValue", @@ -704,6 +731,9 @@ func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.Valu } // UndefinedDecodeValue is the ValueDecoderFunc for Undefined. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tUndefined { return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} @@ -719,7 +749,7 @@ func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw } // Accept both 12-byte string and pretty-printed 24-byte hex string formats. -func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tOID { return emptyValue, ValueDecoderError{ Name: "ObjectIDDecodeValue", @@ -765,6 +795,9 @@ func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.V } // ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tOID { return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} @@ -779,7 +812,7 @@ func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tDateTime { return emptyValue, ValueDecoderError{ Name: "DateTimeDecodeValue", @@ -808,6 +841,9 @@ func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.Value } // DateTimeDecodeValue is the ValueDecoderFunc for DateTime. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tDateTime { return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} @@ -822,7 +858,7 @@ func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tNull { return emptyValue, ValueDecoderError{ Name: "NullDecodeValue", @@ -848,6 +884,9 @@ func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueRead } // NullDecodeValue is the ValueDecoderFunc for Null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tNull { return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} @@ -862,7 +901,7 @@ func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.Valu return nil } -func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tRegex { return emptyValue, ValueDecoderError{ Name: "RegexDecodeValue", @@ -891,6 +930,9 @@ func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueRea } // RegexDecodeValue is the ValueDecoderFunc for Regex. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tRegex { return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} @@ -905,7 +947,7 @@ func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.Val return nil } -func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tDBPointer { return emptyValue, ValueDecoderError{ Name: "DBPointerDecodeValue", @@ -935,6 +977,9 @@ func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.Valu } // DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tDBPointer { return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} @@ -949,7 +994,7 @@ func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw return nil } -func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { if reflectType != tTimestamp { return emptyValue, ValueDecoderError{ Name: "TimestampDecodeValue", @@ -978,6 +1023,9 @@ func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.Valu } // TimestampDecodeValue is the ValueDecoderFunc for Timestamp. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tTimestamp { return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} @@ -992,7 +1040,7 @@ func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw return nil } -func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tMinKey { return emptyValue, ValueDecoderError{ Name: "MinKeyDecodeValue", @@ -1020,6 +1068,9 @@ func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueRe } // MinKeyDecodeValue is the ValueDecoderFunc for MinKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tMinKey { return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} @@ -1034,7 +1085,7 @@ func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.Va return nil } -func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tMaxKey { return emptyValue, ValueDecoderError{ Name: "MaxKeyDecodeValue", @@ -1062,6 +1113,9 @@ func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueRe } // MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tMaxKey { return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} @@ -1076,7 +1130,7 @@ func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.Va return nil } -func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tDecimal { return emptyValue, ValueDecoderError{ Name: "Decimal128DecodeValue", @@ -1105,6 +1159,9 @@ func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bson } // Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tDecimal { return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} @@ -1119,7 +1176,7 @@ func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bso return nil } -func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tJSONNumber { return emptyValue, ValueDecoderError{ Name: "JSONNumberDecodeValue", @@ -1164,6 +1221,9 @@ func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw } // JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tJSONNumber { return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} @@ -1178,7 +1238,7 @@ func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonr return nil } -func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tURL { return emptyValue, ValueDecoderError{ Name: "URLDecodeValue", @@ -1213,6 +1273,9 @@ func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueR } // URLDecodeValue is the ValueDecoderFunc for url.URL. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tURL { return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} @@ -1230,7 +1293,7 @@ func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.Value // TimeDecodeValue is the ValueDecoderFunc for time.Time. // // Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if vr.Type() != bsontype.DateTime { return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) } @@ -1251,7 +1314,7 @@ func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.Valu // ByteSliceDecodeValue is the ValueDecoderFunc for []byte. // // Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) } @@ -1336,6 +1399,9 @@ func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.Value } // ArrayDecodeValue is the ValueDecoderFunc for array types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Array { return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} @@ -1447,7 +1513,10 @@ func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.Val } // ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. -func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} } @@ -1480,7 +1549,10 @@ func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr } // UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. -func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} } @@ -1565,7 +1637,10 @@ func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr b } // CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. -func (DefaultValueDecoders) CoreDocumentDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tCoreDocument { return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} } @@ -1671,6 +1746,9 @@ func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bso } // CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tCodeWithScope { return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go index 6bdb43cb43..7d526c4ef8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -58,10 +58,16 @@ func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) er // DefaultValueEncoders is a namespace type for the default ValueEncoders used // when creating a registry. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. type DefaultValueEncoders struct{} // RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with // the provided RegistryBuilder. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { if rb == nil { panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) @@ -113,7 +119,10 @@ func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { } // BooleanEncodeValue is the ValueEncoderFunc for bool types. -func (dve DefaultValueEncoders) BooleanEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Bool { return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} } @@ -125,6 +134,9 @@ func fitsIn32Bits(i int64) bool { } // IntEncodeValue is the ValueEncoderFunc for int types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { switch val.Kind() { case reflect.Int8, reflect.Int16, reflect.Int32: @@ -176,7 +188,10 @@ func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.Valu } // FloatEncodeValue is the ValueEncoderFunc for float types. -func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { switch val.Kind() { case reflect.Float32, reflect.Float64: return vw.WriteDouble(val.Float()) @@ -188,7 +203,7 @@ func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.Val // StringEncodeValue is the ValueEncoderFunc for string types. // // Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. -func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if val.Kind() != reflect.String { return ValueEncoderError{ Name: "StringEncodeValue", @@ -201,7 +216,10 @@ func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw. } // ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. -func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tOID { return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} } @@ -209,7 +227,10 @@ func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw. } // Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. -func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tDecimal { return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} } @@ -217,6 +238,9 @@ func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonr } // JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tJSONNumber { return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} @@ -237,7 +261,10 @@ func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonr } // URLEncodeValue is the ValueEncoderFunc for url.URL. -func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tURL { return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} } @@ -248,7 +275,7 @@ func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.Value // TimeEncodeValue is the ValueEncoderFunc for time.TIme. // // Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. -func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tTime { return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} } @@ -260,7 +287,7 @@ func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.Valu // ByteSliceEncodeValue is the ValueEncoderFunc for []byte. // // Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tByteSlice { return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} } @@ -343,6 +370,9 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum } // ArrayEncodeValue is the ValueEncoderFunc for array types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Array { return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} @@ -515,7 +545,10 @@ func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw b } // ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. -func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { // Either val or a pointer to val must implement ValueMarshaler switch { case !val.IsValid(): @@ -541,7 +574,10 @@ func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw b } // MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. -func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { // Either val or a pointer to val must implement Marshaler switch { case !val.IsValid(): @@ -567,6 +603,9 @@ func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw } // ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { // Either val or a pointer to val must implement Proxy switch { @@ -603,7 +642,10 @@ func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.Val } // JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. -func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tJavaScript { return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} } @@ -612,7 +654,10 @@ func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw. } // SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. -func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tSymbol { return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} } @@ -621,7 +666,10 @@ func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.Valu } // BinaryEncodeValue is the ValueEncoderFunc for Binary. -func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tBinary { return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} } @@ -631,7 +679,10 @@ func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueW } // UndefinedEncodeValue is the ValueEncoderFunc for Undefined. -func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tUndefined { return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} } @@ -640,7 +691,10 @@ func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.Val } // DateTimeEncodeValue is the ValueEncoderFunc for DateTime. -func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tDateTime { return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} } @@ -649,7 +703,10 @@ func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.Valu } // NullEncodeValue is the ValueEncoderFunc for Null. -func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tNull { return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} } @@ -658,7 +715,10 @@ func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWri } // RegexEncodeValue is the ValueEncoderFunc for Regex. -func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tRegex { return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} } @@ -669,7 +729,10 @@ func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWr } // DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. -func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tDBPointer { return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} } @@ -680,7 +743,10 @@ func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.Val } // TimestampEncodeValue is the ValueEncoderFunc for Timestamp. -func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tTimestamp { return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} } @@ -691,7 +757,10 @@ func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.Val } // MinKeyEncodeValue is the ValueEncoderFunc for MinKey. -func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tMinKey { return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} } @@ -700,7 +769,10 @@ func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueW } // MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. -func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tMaxKey { return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} } @@ -709,7 +781,10 @@ func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueW } // CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. -func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tCoreDocument { return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} } @@ -720,6 +795,9 @@ func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw. } // CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tCodeWithScope { return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go index 5f903ebea6..4613e5a1ec 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -31,35 +31,39 @@ // allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext // instance is provided and serves similar functionality to the EncodeContext. // -// # Registry and RegistryBuilder -// -// A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. See the Registry type -// documentation for examples of registering various custom encoders and decoders. A Registry can be constructed using a -// RegistryBuilder, which handles three main types of codecs: -// -// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and RegisterTypeDecoder methods. -// The registered codec will be invoked when encoding/decoding a value whose type matches the registered type exactly. -// If the registered type is an interface, the codec will be invoked when encoding or decoding values whose type is the -// interface, but not for values with concrete types that implement the interface. -// -// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and RegisterHookDecoder methods. -// These methods only accept interface types and the registered codecs will be invoked when encoding or decoding values -// whose types implement the interface. An example of a hook defined by the driver is bson.Marshaler. The driver will -// call the MarshalBSON method for any value whose type implements bson.Marshaler, regardless of the value's concrete -// type. -// -// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type associations are used when -// decoding into a bson.D/bson.M or a struct field of type interface{}. For example, by default, BSON int32 and int64 -// values decode as Go int32 and int64 instances, respectively, when decoding into a bson.D. The following code would -// change the behavior so these values decode as Go int instances instead: +// # Registry +// +// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type +// documentation for examples of registering various custom encoders and decoders. A Registry can +// have three main types of codecs: +// +// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and +// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value +// whose type matches the registered type exactly. +// If the registered type is an interface, the codec will be invoked when encoding or decoding +// values whose type is the interface, but not for values with concrete types that implement the +// interface. +// +// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and +// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs +// will be invoked when encoding or decoding values whose types implement the interface. An example +// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method +// for any value whose type implements bson.Marshaler, regardless of the value's concrete type. +// +// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type +// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}. +// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances, +// respectively, when decoding into a bson.D. The following code would change the behavior so these +// values decode as Go int instances instead: // // intType := reflect.TypeOf(int(0)) -// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) +// registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) // -// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and RegisterDefaultDecoder -// methods. The registered codec will be invoked when encoding or decoding values whose reflect.Kind matches the -// registered reflect.Kind as long as the value's type doesn't match a registered type or hook encoder/decoder first. -// These methods should be used to change the behavior for all values for a specific kind. +// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and +// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding +// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't +// match a registered type or hook encoder/decoder first. These methods should be used to change the +// behavior for all values for a specific kind. // // # Registry Lookup Procedure // @@ -67,17 +71,18 @@ // // 1. A type encoder registered for the exact type of the value. // -// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to the value. If the -// value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and bsoncodec.ValueMarshaler), the first -// one registered will be selected. Note that registries constructed using bson.NewRegistryBuilder have driver-defined -// hooks registered for the bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those -// will take precedence over any new hooks. +// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to +// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and +// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries +// constructed using bson.NewRegistry have driver-defined hooks registered for the +// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take +// precedence over any new hooks. // // 3. A kind encoder registered for the value's kind. // -// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The same precedence -// rules apply for decoders, with the exception that an error of type ErrNoDecoder will be returned if no decoder is -// found. +// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The +// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder +// will be returned if no decoder is found. // // # DefaultValueEncoders and DefaultValueDecoders // diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go index eda417cff8..94f7dcf1eb 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -16,18 +16,30 @@ import ( ) // EmptyInterfaceCodec is the Codec used for interface{} values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// EmptyInterfaceCodec registered. type EmptyInterfaceCodec struct { + // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the + // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. + // + // Deprecated: Use bson.Decoder.BinaryAsSlice instead. DecodeBinaryAsSlice bool } var ( defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() - _ ValueCodec = defaultEmptyInterfaceCodec + // Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it + // to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. _ typeDecoder = defaultEmptyInterfaceCodec ) // NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// EmptyInterfaceCodec registered. func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) @@ -121,7 +133,7 @@ func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReade return emptyValue, err } - if eic.DecodeBinaryAsSlice && rtype == tBinary { + if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary { binElem := elem.Interface().(primitive.Binary) if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { elem = reflect.ValueOf(binElem.Data) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go index e1fbef9c6c..325c1738ab 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -20,14 +20,29 @@ import ( var defaultMapCodec = NewMapCodec() // MapCodec is the Codec used for map values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// MapCodec registered. type MapCodec struct { - DecodeZerosMap bool - EncodeNilAsEmpty bool + // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination + // value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: Use bson.Decoder.ZeroMaps instead. + DecodeZerosMap bool + + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of + // BSON null. + // + // Deprecated: Use bson.Encoder.NilMapAsEmpty instead. + EncodeNilAsEmpty bool + + // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name + // strings using fmt.Sprintf() instead of the default string conversion logic. + // + // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt instead. EncodeKeysWithStringer bool } -var _ ValueCodec = &MapCodec{} - // KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. // This applies to types used as map keys and is similar to encoding.TextMarshaler. type KeyMarshaler interface { @@ -45,6 +60,9 @@ type KeyUnmarshaler interface { } // NewMapCodec returns a MapCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// MapCodec registered. func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { mapOpt := bsonoptions.MergeMapCodecOptions(opts...) @@ -67,7 +85,7 @@ func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val ref return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} } - if val.IsNil() && !mc.EncodeNilAsEmpty { + if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty { // If we have a nil map but we can't WriteNull, that means we're probably trying to encode // to a TopLevel document. We can't currently tell if this is what actually happened, but if // there's a deeper underlying problem, the error will also be returned from WriteDocument, @@ -100,7 +118,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v keys := val.MapKeys() for _, key := range keys { - keyStr, err := mc.encodeKey(key) + keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt) if err != nil { return err } @@ -163,7 +181,7 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref val.Set(reflect.MakeMap(val.Type())) } - if val.Len() > 0 && mc.DecodeZerosMap { + if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) { clearMap(val) } @@ -211,8 +229,8 @@ func clearMap(m reflect.Value) { } } -func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { - if mc.EncodeKeysWithStringer { +func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) { + if mc.EncodeKeysWithStringer || encodeKeysWithStringer { return fmt.Sprint(val), nil } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go index 616a3e701b..a1bf9c3e2b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -18,6 +18,9 @@ var _ ValueEncoder = &PointerCodec{} var _ ValueDecoder = &PointerCodec{} // PointerCodec is the Codec used for pointers. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// PointerCodec registered. type PointerCodec struct { ecache map[reflect.Type]ValueEncoder dcache map[reflect.Type]ValueDecoder @@ -25,6 +28,9 @@ type PointerCodec struct { } // NewPointerCodec returns a PointerCodec that has been initialized. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// PointerCodec registered. func NewPointerCodec() *PointerCodec { return &PointerCodec{ ecache: make(map[reflect.Type]ValueEncoder), diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index 80644023c2..930de28490 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -16,12 +16,18 @@ import ( ) // ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. +// +// Deprecated: ErrNilType will not be supported in Go Driver 2.0. var ErrNilType = errors.New("cannot perform a decoder lookup on ") // ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. +// +// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0. var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") // ErrNoEncoder is returned when there wasn't an encoder available for a type. +// +// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0. type ErrNoEncoder struct { Type reflect.Type } @@ -34,6 +40,8 @@ func (ene ErrNoEncoder) Error() string { } // ErrNoDecoder is returned when there wasn't a decoder available for a type. +// +// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0. type ErrNoDecoder struct { Type reflect.Type } @@ -43,6 +51,8 @@ func (end ErrNoDecoder) Error() string { } // ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. +// +// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0. type ErrNoTypeMapEntry struct { Type bsontype.Type } @@ -52,63 +62,30 @@ func (entme ErrNoTypeMapEntry) Error() string { } // ErrNotInterface is returned when the provided type is not an interface. +// +// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0. var ErrNotInterface = errors.New("The provided type is not an interface") // A RegistryBuilder is used to build a Registry. This type is not goroutine // safe. +// +// Deprecated: Use Registry instead. type RegistryBuilder struct { - typeEncoders map[reflect.Type]ValueEncoder - interfaceEncoders []interfaceValueEncoder - kindEncoders map[reflect.Kind]ValueEncoder - - typeDecoders map[reflect.Type]ValueDecoder - interfaceDecoders []interfaceValueDecoder - kindDecoders map[reflect.Kind]ValueDecoder - - typeMap map[bsontype.Type]reflect.Type -} - -// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main -// typed passed around and Encoders and Decoders are constructed from it. -type Registry struct { - typeEncoders map[reflect.Type]ValueEncoder - typeDecoders map[reflect.Type]ValueDecoder - - interfaceEncoders []interfaceValueEncoder - interfaceDecoders []interfaceValueDecoder - - kindEncoders map[reflect.Kind]ValueEncoder - kindDecoders map[reflect.Kind]ValueDecoder - - typeMap map[bsontype.Type]reflect.Type - - mu sync.RWMutex + registry *Registry } // NewRegistryBuilder creates a new empty RegistryBuilder. +// +// Deprecated: Use NewRegistry instead. func NewRegistryBuilder() *RegistryBuilder { return &RegistryBuilder{ - typeEncoders: make(map[reflect.Type]ValueEncoder), - typeDecoders: make(map[reflect.Type]ValueDecoder), - - interfaceEncoders: make([]interfaceValueEncoder, 0), - interfaceDecoders: make([]interfaceValueDecoder, 0), - - kindEncoders: make(map[reflect.Kind]ValueEncoder), - kindDecoders: make(map[reflect.Kind]ValueDecoder), - - typeMap: make(map[bsontype.Type]reflect.Type), + registry: NewRegistry(), } } -func buildDefaultRegistry() *Registry { - rb := NewRegistryBuilder() - defaultValueEncoders.RegisterDefaultEncoders(rb) - defaultValueDecoders.RegisterDefaultDecoders(rb) - return rb.Build() -} - // RegisterCodec will register the provided ValueCodec for the provided type. +// +// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead. func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { rb.RegisterTypeEncoder(t, codec) rb.RegisterTypeDecoder(t, codec) @@ -120,31 +97,22 @@ func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *Regi // The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered // for a pointer to that type. // -// If the given type is an interface, the encoder will be called when marshalling a type that is that interface. It -// will not be called when marshalling a non-interface type that implements the interface. +// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It +// will not be called when marshaling a non-interface type that implements the interface. +// +// Deprecated: Use Registry.RegisterTypeEncoder instead. func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - rb.typeEncoders[t] = enc + rb.registry.RegisterTypeEncoder(t, enc) return rb } // RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when -// marshalling a type if the type implements t or a pointer to the type implements t. If the provided type is not +// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not // an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +// +// Deprecated: Use Registry.RegisterInterfaceEncoder instead. func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - if t.Kind() != reflect.Interface { - panicStr := fmt.Sprintf("RegisterHookEncoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", t, t.Kind()) - panic(panicStr) - } - - for idx, encoder := range rb.interfaceEncoders { - if encoder.i == t { - rb.interfaceEncoders[idx].ve = enc - return rb - } - } - - rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) + rb.registry.RegisterInterfaceEncoder(t, enc) return rb } @@ -153,97 +121,78 @@ func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) // The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered // for a pointer to that type. // -// If the given type is an interface, the decoder will be called when unmarshalling into a type that is that interface. -// It will not be called when unmarshalling into a non-interface type that implements the interface. +// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface. +// It will not be called when unmarshaling into a non-interface type that implements the interface. +// +// Deprecated: Use Registry.RegisterTypeDecoder instead. func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - rb.typeDecoders[t] = dec + rb.registry.RegisterTypeDecoder(t, dec) return rb } // RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when -// unmarshalling into a type if the type implements t or a pointer to the type implements t. If the provided type is not +// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not // an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +// +// Deprecated: Use Registry.RegisterInterfaceDecoder instead. func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - if t.Kind() != reflect.Interface { - panicStr := fmt.Sprintf("RegisterHookDecoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", t, t.Kind()) - panic(panicStr) - } - - for idx, decoder := range rb.interfaceDecoders { - if decoder.i == t { - rb.interfaceDecoders[idx].vd = dec - return rb - } - } - - rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) + rb.registry.RegisterInterfaceDecoder(t, dec) return rb } // RegisterEncoder registers the provided type and encoder pair. // -// Deprecated: Use RegisterTypeEncoder or RegisterHookEncoder instead. +// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead. func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { if t == tEmpty { - rb.typeEncoders[t] = enc + rb.registry.RegisterTypeEncoder(t, enc) return rb } switch t.Kind() { case reflect.Interface: - for idx, ir := range rb.interfaceEncoders { - if ir.i == t { - rb.interfaceEncoders[idx].ve = enc - return rb - } - } - - rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) + rb.registry.RegisterInterfaceEncoder(t, enc) default: - rb.typeEncoders[t] = enc + rb.registry.RegisterTypeEncoder(t, enc) } return rb } // RegisterDecoder registers the provided type and decoder pair. // -// Deprecated: Use RegisterTypeDecoder or RegisterHookDecoder instead. +// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead. func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { if t == nil { - rb.typeDecoders[nil] = dec + rb.registry.RegisterTypeDecoder(t, dec) return rb } if t == tEmpty { - rb.typeDecoders[t] = dec + rb.registry.RegisterTypeDecoder(t, dec) return rb } switch t.Kind() { case reflect.Interface: - for idx, ir := range rb.interfaceDecoders { - if ir.i == t { - rb.interfaceDecoders[idx].vd = dec - return rb - } - } - - rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) + rb.registry.RegisterInterfaceDecoder(t, dec) default: - rb.typeDecoders[t] = dec + rb.registry.RegisterTypeDecoder(t, dec) } return rb } -// RegisterDefaultEncoder will registr the provided ValueEncoder to the provided +// RegisterDefaultEncoder will register the provided ValueEncoder to the provided // kind. +// +// Deprecated: Use Registry.RegisterKindEncoder instead. func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { - rb.kindEncoders[kind] = enc + rb.registry.RegisterKindEncoder(kind, enc) return rb } // RegisterDefaultDecoder will register the provided ValueDecoder to the // provided kind. +// +// Deprecated: Use Registry.RegisterKindDecoder instead. func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { - rb.kindDecoders[kind] = dec + rb.registry.RegisterKindDecoder(kind, dec) return rb } @@ -256,120 +205,277 @@ func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDe // to decode to bson.Raw, use the following code: // // rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +// +// Deprecated: Use Registry.RegisterTypeMapEntry instead. func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { - rb.typeMap[bt] = rt + rb.registry.RegisterTypeMapEntry(bt, rt) return rb } // Build creates a Registry from the current state of this RegistryBuilder. +// +// Deprecated: Use NewRegistry instead. func (rb *RegistryBuilder) Build() *Registry { registry := new(Registry) - registry.typeEncoders = make(map[reflect.Type]ValueEncoder) - for t, enc := range rb.typeEncoders { + registry.typeEncoders = make(map[reflect.Type]ValueEncoder, len(rb.registry.typeEncoders)) + for t, enc := range rb.registry.typeEncoders { registry.typeEncoders[t] = enc } - registry.typeDecoders = make(map[reflect.Type]ValueDecoder) - for t, dec := range rb.typeDecoders { + registry.typeDecoders = make(map[reflect.Type]ValueDecoder, len(rb.registry.typeDecoders)) + for t, dec := range rb.registry.typeDecoders { registry.typeDecoders[t] = dec } - registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.interfaceEncoders)) - copy(registry.interfaceEncoders, rb.interfaceEncoders) + registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.registry.interfaceEncoders)) + copy(registry.interfaceEncoders, rb.registry.interfaceEncoders) - registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.interfaceDecoders)) - copy(registry.interfaceDecoders, rb.interfaceDecoders) + registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.registry.interfaceDecoders)) + copy(registry.interfaceDecoders, rb.registry.interfaceDecoders) registry.kindEncoders = make(map[reflect.Kind]ValueEncoder) - for kind, enc := range rb.kindEncoders { + for kind, enc := range rb.registry.kindEncoders { registry.kindEncoders[kind] = enc } registry.kindDecoders = make(map[reflect.Kind]ValueDecoder) - for kind, dec := range rb.kindDecoders { + for kind, dec := range rb.registry.kindDecoders { registry.kindDecoders[kind] = dec } registry.typeMap = make(map[bsontype.Type]reflect.Type) - for bt, rt := range rb.typeMap { + for bt, rt := range rb.registry.typeMap { registry.typeMap[bt] = rt } return registry } -// LookupEncoder inspects the registry for an encoder for the given type. The lookup precedence works as follows: +// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main +// typed passed around and Encoders and Decoders are constructed from it. +type Registry struct { + typeEncoders map[reflect.Type]ValueEncoder + typeDecoders map[reflect.Type]ValueDecoder + + interfaceEncoders []interfaceValueEncoder + interfaceDecoders []interfaceValueDecoder + + kindEncoders map[reflect.Kind]ValueEncoder + kindDecoders map[reflect.Kind]ValueDecoder + + typeMap map[bsontype.Type]reflect.Type + + mu sync.RWMutex +} + +// NewRegistry creates a new empty Registry. +func NewRegistry() *Registry { + return &Registry{ + typeEncoders: make(map[reflect.Type]ValueEncoder), + typeDecoders: make(map[reflect.Type]ValueDecoder), + + interfaceEncoders: make([]interfaceValueEncoder, 0), + interfaceDecoders: make([]interfaceValueDecoder, 0), + + kindEncoders: make(map[reflect.Kind]ValueEncoder), + kindDecoders: make(map[reflect.Kind]ValueDecoder), + + typeMap: make(map[bsontype.Type]reflect.Type), + } +} + +// RegisterTypeEncoder registers the provided ValueEncoder for the provided type. +// +// The type will be used as provided, so an encoder can be registered for a type and a different +// encoder can be registered for a pointer to that type. +// +// If the given type is an interface, the encoder will be called when marshaling a type that is +// that interface. It will not be called when marshaling a non-interface type that implements the +// interface. To get the latter behavior, call RegisterHookEncoder instead. +// +// RegisterTypeEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) { + r.typeEncoders[valueType] = enc +} + +// RegisterTypeDecoder registers the provided ValueDecoder for the provided type. +// +// The type will be used as provided, so a decoder can be registered for a type and a different +// decoder can be registered for a pointer to that type. +// +// If the given type is an interface, the decoder will be called when unmarshaling into a type that +// is that interface. It will not be called when unmarshaling into a non-interface type that +// implements the interface. To get the latter behavior, call RegisterHookDecoder instead. +// +// RegisterTypeDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) { + r.typeDecoders[valueType] = dec +} + +// RegisterKindEncoder registers the provided ValueEncoder for the provided kind. +// +// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For +// example, consider the type MyInt defined as +// +// type MyInt int32 +// +// To define an encoder for MyInt and int32, use RegisterKindEncoder like +// +// reg.RegisterKindEncoder(reflect.Int32, myEncoder) +// +// RegisterKindEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { + r.kindEncoders[kind] = enc +} + +// RegisterKindDecoder registers the provided ValueDecoder for the provided kind. +// +// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For +// example, consider the type MyInt defined as // -// 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using -// RegisterTypeEncoder for the interface will be selected. +// type MyInt int32 // -// 2. An encoder registered using RegisterHookEncoder for an interface implemented by the type or by a pointer to the -// type. +// To define an decoder for MyInt and int32, use RegisterKindDecoder like // -// 3. An encoder registered for the reflect.Kind of the value. +// reg.RegisterKindDecoder(reflect.Int32, myDecoder) // -// If no encoder is found, an error of type ErrNoEncoder is returned. -func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) { - encodererr := ErrNoEncoder{Type: t} +// RegisterKindDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) { + r.kindDecoders[kind] = dec +} + +// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will +// be called when marshaling a type if the type implements iface or a pointer to the type +// implements iface. If the provided type is not an interface +// (i.e. iface.Kind() != reflect.Interface), this method will panic. +// +// RegisterInterfaceEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) { + if iface.Kind() != reflect.Interface { + panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", iface, iface.Kind()) + panic(panicStr) + } + + for idx, encoder := range r.interfaceEncoders { + if encoder.i == iface { + r.interfaceEncoders[idx].ve = enc + return + } + } + + r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc}) +} + +// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will +// be called when unmarshaling into a type if the type implements iface or a pointer to the type +// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface), +// this method will panic. +// +// RegisterInterfaceDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) { + if iface.Kind() != reflect.Interface { + panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", iface, iface.Kind()) + panic(panicStr) + } + + for idx, decoder := range r.interfaceDecoders { + if decoder.i == iface { + r.interfaceDecoders[idx].vd = dec + return + } + } + + r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec}) +} + +// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this +// mapping is decoding situations where an empty interface is used and a default type needs to be +// created and decoded into. +// +// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON +// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents +// to decode to bson.Raw, use the following code: +// +// reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { + r.typeMap[bt] = rt +} + +// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup +// order: +// +// 1. An encoder registered for the exact type. If the given type is an interface, an encoder +// registered using RegisterTypeEncoder for that interface will be selected. +// +// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type +// or by a pointer to the type. +// +// 3. An encoder registered using RegisterKindEncoder for the kind of value. +// +// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for +// concurrent use by multiple goroutines after all codecs and encoders are registered. +func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { r.mu.RLock() - enc, found := r.lookupTypeEncoder(t) + enc, found := r.lookupTypeEncoder(valueType) r.mu.RUnlock() if found { if enc == nil { - return nil, ErrNoEncoder{Type: t} + return nil, ErrNoEncoder{Type: valueType} } return enc, nil } - enc, found = r.lookupInterfaceEncoder(t, true) + enc, found = r.lookupInterfaceEncoder(valueType, true) if found { r.mu.Lock() - r.typeEncoders[t] = enc + r.typeEncoders[valueType] = enc r.mu.Unlock() return enc, nil } - if t == nil { + if valueType == nil { r.mu.Lock() - r.typeEncoders[t] = nil + r.typeEncoders[valueType] = nil r.mu.Unlock() - return nil, encodererr + return nil, ErrNoEncoder{Type: valueType} } - enc, found = r.kindEncoders[t.Kind()] + enc, found = r.kindEncoders[valueType.Kind()] if !found { r.mu.Lock() - r.typeEncoders[t] = nil + r.typeEncoders[valueType] = nil r.mu.Unlock() - return nil, encodererr + return nil, ErrNoEncoder{Type: valueType} } r.mu.Lock() - r.typeEncoders[t] = enc + r.typeEncoders[valueType] = enc r.mu.Unlock() return enc, nil } -func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) { - enc, found := r.typeEncoders[t] +func (r *Registry) lookupTypeEncoder(valueType reflect.Type) (ValueEncoder, bool) { + enc, found := r.typeEncoders[valueType] return enc, found } -func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (ValueEncoder, bool) { - if t == nil { +func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) { + if valueType == nil { return nil, false } for _, ienc := range r.interfaceEncoders { - if t.Implements(ienc.i) { + if valueType.Implements(ienc.i) { return ienc.ve, true } - if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(ienc.i) { - // if *t implements an interface, this will catch if t implements an interface further ahead - // in interfaceEncoders - defaultEnc, found := r.lookupInterfaceEncoder(t, false) + if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) { + // if *t implements an interface, this will catch if t implements an interface further + // ahead in interfaceEncoders + defaultEnc, found := r.lookupInterfaceEncoder(valueType, false) if !found { - defaultEnc = r.kindEncoders[t.Kind()] + defaultEnc = r.kindEncoders[valueType.Kind()] } return newCondAddrEncoder(ienc.ve, defaultEnc), true } @@ -377,70 +483,72 @@ func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (Value return nil, false } -// LookupDecoder inspects the registry for an decoder for the given type. The lookup precedence works as follows: +// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup +// order: // -// 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using -// RegisterTypeDecoder for the interface will be selected. +// 1. A decoder registered for the exact type. If the given type is an interface, a decoder +// registered using RegisterTypeDecoder for that interface will be selected. // -// 2. A decoder registered using RegisterHookDecoder for an interface implemented by the type or by a pointer to the -// type. +// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by +// a pointer to the type. // -// 3. A decoder registered for the reflect.Kind of the value. +// 3. A decoder registered using RegisterKindDecoder for the kind of value. // -// If no decoder is found, an error of type ErrNoDecoder is returned. -func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) { - if t == nil { +// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for +// concurrent use by multiple goroutines after all codecs and decoders are registered. +func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { + if valueType == nil { return nil, ErrNilType } - decodererr := ErrNoDecoder{Type: t} + decodererr := ErrNoDecoder{Type: valueType} r.mu.RLock() - dec, found := r.lookupTypeDecoder(t) + dec, found := r.lookupTypeDecoder(valueType) r.mu.RUnlock() if found { if dec == nil { - return nil, ErrNoDecoder{Type: t} + return nil, ErrNoDecoder{Type: valueType} } return dec, nil } - dec, found = r.lookupInterfaceDecoder(t, true) + dec, found = r.lookupInterfaceDecoder(valueType, true) if found { r.mu.Lock() - r.typeDecoders[t] = dec + r.typeDecoders[valueType] = dec r.mu.Unlock() return dec, nil } - dec, found = r.kindDecoders[t.Kind()] + dec, found = r.kindDecoders[valueType.Kind()] if !found { r.mu.Lock() - r.typeDecoders[t] = nil + r.typeDecoders[valueType] = nil r.mu.Unlock() return nil, decodererr } r.mu.Lock() - r.typeDecoders[t] = dec + r.typeDecoders[valueType] = dec r.mu.Unlock() return dec, nil } -func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) { - dec, found := r.typeDecoders[t] +func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) { + dec, found := r.typeDecoders[valueType] return dec, found } -func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (ValueDecoder, bool) { +func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) { for _, idec := range r.interfaceDecoders { - if t.Implements(idec.i) { + if valueType.Implements(idec.i) { return idec.vd, true } - if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(idec.i) { - // if *t implements an interface, this will catch if t implements an interface further ahead - // in interfaceDecoders - defaultDec, found := r.lookupInterfaceDecoder(t, false) + if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) { + // if *t implements an interface, this will catch if t implements an interface further + // ahead in interfaceDecoders + defaultDec, found := r.lookupInterfaceDecoder(valueType, false) if !found { - defaultDec = r.kindDecoders[t.Kind()] + defaultDec = r.kindDecoders[valueType.Kind()] } return newCondAddrDecoder(idec.vd, defaultDec), true } @@ -450,6 +558,8 @@ func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (Value // LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON // type. If no type is found, ErrNoTypeMapEntry is returned. +// +// LookupTypeMapEntry should not be called concurrently with any other Registry method. func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { t, ok := r.typeMap[bt] if !ok || t == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go index 3c1b6b860a..20c3e7549c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -19,13 +19,21 @@ import ( var defaultSliceCodec = NewSliceCodec() // SliceCodec is the Codec used for slice values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// SliceCodec registered. type SliceCodec struct { + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of + // BSON null. + // + // Deprecated: Use bson.Encoder.NilSliceAsEmpty instead. EncodeNilAsEmpty bool } -var _ ValueCodec = &MapCodec{} - // NewSliceCodec returns a MapCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// SliceCodec registered. func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) @@ -42,16 +50,14 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} } - if val.IsNil() && !sc.EncodeNilAsEmpty { + if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty { return vw.WriteNull() } // If we have a []byte we want to treat it as a binary instead of as an array. if val.Type().Elem() == tByte { - var byteSlice []byte - for idx := 0; idx < val.Len(); idx++ { - byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) - } + byteSlice := make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(byteSlice), val) return vw.WriteBinary(byteSlice) } @@ -145,11 +151,8 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r if val.IsNil() { val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) } - val.SetLen(0) - for _, elem := range data { - val.Set(reflect.Append(val, reflect.ValueOf(elem))) - } + val.Set(reflect.AppendSlice(val, reflect.ValueOf(data))) return nil case bsontype.String: if sliceType := val.Type().Elem(); sliceType != tByte { @@ -164,11 +167,8 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r if val.IsNil() { val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) } - val.SetLen(0) - for _, elem := range byteStr { - val.Set(reflect.Append(val, reflect.ValueOf(elem))) - } + val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr))) return nil default: return fmt.Errorf("cannot decode %v into a slice", vrType) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go index 5332b7c3b5..ff931b7253 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go @@ -15,26 +15,38 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -// StringCodec is the Codec used for struct values. +// StringCodec is the Codec used for string values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// StringCodec registered. type StringCodec struct { + // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. + // If false, a string made from the raw object ID bytes will be used. Defaults to true. + // + // Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. DecodeObjectIDAsHex bool } var ( defaultStringCodec = NewStringCodec() - _ ValueCodec = defaultStringCodec + // Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be + // used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. _ typeDecoder = defaultStringCodec ) // NewStringCodec returns a StringCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// StringCodec registered. func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { stringOpt := bsonoptions.MergeStringCodecOptions(opts...) return &StringCodec{*stringOpt.DecodeObjectIDAsHex} } // EncodeValue is the ValueEncoder for string types. -func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if val.Kind() != reflect.String { return ValueEncoderError{ Name: "StringEncodeValue", @@ -46,7 +58,7 @@ func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, va return vw.WriteString(val.String()) } -func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t.Kind() != reflect.String { return emptyValue, ValueDecoderError{ Name: "StringDecodeValue", @@ -71,6 +83,7 @@ func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t ref if sc.DecodeObjectIDAsHex { str = oid.Hex() } else { + // TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string. byteArray := [12]byte(oid) str = string(byteArray[:]) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go index da1ae18e02..1dfdd98865 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -59,14 +59,44 @@ type Zeroer interface { } // StructCodec is the Codec used for struct values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// StructCodec registered. type StructCodec struct { - cache map[reflect.Type]*structDescription - l sync.RWMutex - parser StructTagParser - DecodeZeroStruct bool - DecodeDeepZeroInline bool - EncodeOmitDefaultStruct bool - AllowUnexportedFields bool + cache map[reflect.Type]*structDescription + l sync.RWMutex + parser StructTagParser + + // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the + // destination value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: Use bson.Decoder.ZeroStructs instead. + DecodeZeroStruct bool + + // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the + // destination value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. + DecodeDeepZeroInline bool + + // EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g. + // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag + // option is set. + // + // Deprecated: Use bson.Encoder.OmitZeroStruct instead. + EncodeOmitDefaultStruct bool + + // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields. + // + // Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be + // supported in Go Driver 2.0. + AllowUnexportedFields bool + + // OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is + // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The + // default value is true. + // + // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates instead. OverwriteDuplicatedInlinedFields bool } @@ -74,6 +104,9 @@ var _ ValueEncoder = &StructCodec{} var _ ValueDecoder = &StructCodec{} // NewStructCodec returns a StructCodec that uses p for struct tag parsing. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// StructCodec registered. func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { if p == nil { return nil, errors.New("a StructTagParser must be provided to NewStructCodec") @@ -106,12 +139,12 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) } // EncodeValue handles encoding generic struct types. -func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Struct { return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} } - sd, err := sc.describeStruct(r.Registry, val.Type()) + sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates) if err != nil { return err } @@ -131,7 +164,7 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r } } - desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(r, desc.encoder, rv) + desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv) if err != nil && err != errInvalidValue { return err @@ -158,17 +191,18 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r encoder := desc.encoder - var isZero bool + var zero bool rvInterface := rv.Interface() if cz, ok := encoder.(CodecZeroer); ok { - isZero = cz.IsTypeZero(rvInterface) + zero = cz.IsTypeZero(rvInterface) } else if rv.Kind() == reflect.Interface { - // sc.isZero will not treat an interface rv as an interface, so we need to check for the zero interface separately. - isZero = rv.IsNil() + // isZero will not treat an interface rv as an interface, so we need to check for the + // zero interface separately. + zero = rv.IsNil() } else { - isZero = sc.isZero(rvInterface) + zero = isZero(rvInterface, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) } - if desc.omitEmpty && isZero { + if desc.omitEmpty && zero { continue } @@ -177,7 +211,17 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r return err } - ectx := EncodeContext{Registry: r.Registry, MinSize: desc.minSize} + ectx := EncodeContext{ + Registry: ec.Registry, + MinSize: desc.minSize || ec.MinSize, + errorOnInlineDuplicates: ec.errorOnInlineDuplicates, + stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt, + nilMapAsEmpty: ec.nilMapAsEmpty, + nilSliceAsEmpty: ec.nilSliceAsEmpty, + nilByteSliceAsEmpty: ec.nilByteSliceAsEmpty, + omitZeroStruct: ec.omitZeroStruct, + useJSONStructTags: ec.useJSONStructTags, + } err = encoder.EncodeValue(ectx, vw2, rv) if err != nil { return err @@ -191,7 +235,7 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r return exists } - return defaultMapCodec.mapEncodeValue(r, dw, rv, collisionFn) + return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn) } return dw.WriteDocumentEnd() @@ -213,7 +257,7 @@ func newDecodeError(key string, original error) error { // DecodeValue implements the Codec interface. // By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. // For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. -func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Kind() != reflect.Struct { return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} } @@ -238,12 +282,12 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) } - sd, err := sc.describeStruct(r.Registry, val.Type()) + sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false) if err != nil { return err } - if sc.DecodeZeroStruct { + if sc.DecodeZeroStruct || dc.zeroStructs { val.Set(reflect.Zero(val.Type())) } if sc.DecodeDeepZeroInline && sd.inline { @@ -254,7 +298,7 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r var inlineMap reflect.Value if sd.inlineMap >= 0 { inlineMap = val.Field(sd.inlineMap) - decoder, err = r.LookupDecoder(inlineMap.Type().Elem()) + decoder, err = dc.LookupDecoder(inlineMap.Type().Elem()) if err != nil { return err } @@ -298,8 +342,8 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r } elem := reflect.New(inlineMap.Type().Elem()).Elem() - r.Ancestor = inlineMap.Type() - err = decoder.DecodeValue(r, vr, elem) + dc.Ancestor = inlineMap.Type() + err = decoder.DecodeValue(dc, vr, elem) if err != nil { return err } @@ -327,9 +371,14 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r field = field.Addr() dctx := DecodeContext{ - Registry: r.Registry, - Truncate: fd.truncate || r.Truncate, - defaultDocumentType: r.defaultDocumentType, + Registry: dc.Registry, + Truncate: fd.truncate || dc.Truncate, + defaultDocumentType: dc.defaultDocumentType, + binaryAsSlice: dc.binaryAsSlice, + useJSONStructTags: dc.useJSONStructTags, + useLocalTimeZone: dc.useLocalTimeZone, + zeroMaps: dc.zeroMaps, + zeroStructs: dc.zeroStructs, } if fd.decoder == nil { @@ -345,7 +394,7 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r return nil } -func (sc *StructCodec) isZero(i interface{}) bool { +func isZero(i interface{}, omitZeroStruct bool) bool { v := reflect.ValueOf(i) // check the value validity @@ -371,22 +420,27 @@ func (sc *StructCodec) isZero(i interface{}) bool { case reflect.Interface, reflect.Ptr: return v.IsNil() case reflect.Struct: - if sc.EncodeOmitDefaultStruct { - vt := v.Type() - if vt == tTime { - return v.Interface().(time.Time).IsZero() + if !omitZeroStruct { + return false + } + + // TODO(GODRIVER-2820): Update the logic to be able to handle private struct fields. + // TODO Use condition "reflect.Zero(v.Type()).Equal(v)" instead. + + vt := v.Type() + if vt == tTime { + return v.Interface().(time.Time).IsZero() + } + for i := 0; i < v.NumField(); i++ { + if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { + continue // Private field } - for i := 0; i < v.NumField(); i++ { - if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { - continue // Private field - } - fld := v.Field(i) - if !sc.isZero(fld.Interface()) { - return false - } + fld := v.Field(i) + if !isZero(fld.Interface(), omitZeroStruct) { + return false } - return true } + return true } return false @@ -440,7 +494,12 @@ func (bi byIndex) Less(i, j int) bool { return len(bi[i].inline) < len(bi[j].inline) } -func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) { +func (sc *StructCodec) describeStruct( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { // We need to analyze the struct, including getting the tags, collecting // information about inlining, and create a map of the field name to the field. sc.l.RLock() @@ -482,7 +541,14 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr decoder: decoder, } - stags, err := sc.parser.ParseStructTags(sf) + var stags StructTags + // If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser + // instead of the parser defined on the codec. + if useJSONStructTags { + stags, err = JSONFallbackStructTagParser.ParseStructTags(sf) + } else { + stags, err = sc.parser.ParseStructTags(sf) + } if err != nil { return nil, err } @@ -512,7 +578,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr } fallthrough case reflect.Struct: - inlinesf, err := sc.describeStruct(r, sfType) + inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates) if err != nil { return nil, err } @@ -564,7 +630,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr continue } dominant, ok := dominantField(fields[i : i+advance]) - if !ok || !sc.OverwriteDuplicatedInlinedFields { + if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates { return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) } sd.fl = append(sd.fl, dominant) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go index 62708c5c74..18d85bfb03 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go @@ -12,12 +12,16 @@ import ( ) // StructTagParser returns the struct tags for a given struct field. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. type StructTagParser interface { ParseStructTags(reflect.StructField) (StructTags, error) } // StructTagParserFunc is an adapter that allows a generic function to be used // as a StructTagParser. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. type StructTagParserFunc func(reflect.StructField) (StructTags, error) // ParseStructTags implements the StructTagParser interface. @@ -50,7 +54,7 @@ func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructT // Skip This struct field should be skipped. This is usually denoted by parsing a "-" // for the name. // -// TODO(skriptble): Add tags for undefined as nil and for null as nil. +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. type StructTags struct { Name string OmitEmpty bool @@ -85,6 +89,8 @@ type StructTags struct { // A struct tag either consisting entirely of '-' or with a bson key with a // value consisting entirely of '-' will return a StructTags with Skip true and // the remaining fields will be their default values. +// +// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0. var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { key := strings.ToLower(sf.Name) tag, ok := sf.Tag.Lookup("bson") @@ -125,6 +131,9 @@ func parseTags(key string, tag string) (StructTags, error) { // JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser // but will also fallback to parsing the json tag instead on a field where the // bson tag isn't available. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and +// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { key := strings.ToLower(sf.Name) tag, ok := sf.Tag.Lookup("bson") diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go index ec7e30f724..7b005a9958 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go @@ -22,18 +22,28 @@ const ( ) // TimeCodec is the Codec used for time.Time values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// TimeCodec registered. type TimeCodec struct { + // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. + // + // Deprecated: Use bson.Decoder.UseLocalTimeZone instead. UseLocalTimeZone bool } var ( defaultTimeCodec = NewTimeCodec() - _ ValueCodec = defaultTimeCodec + // Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used + // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. _ typeDecoder = defaultTimeCodec ) // NewTimeCodec returns a TimeCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// TimeCodec registered. func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) @@ -95,7 +105,7 @@ func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t refle return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType) } - if !tc.UseLocalTimeZone { + if !tc.UseLocalTimeZone && !dc.useLocalTimeZone { timeVal = timeVal.UTC() } return reflect.ValueOf(timeVal), nil @@ -117,7 +127,7 @@ func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val re } // EncodeValue is the ValueEncoderFunc for time.TIme. -func (tc *TimeCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tTime { return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go index 0b21ce999c..7eb1069050 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -17,18 +17,29 @@ import ( ) // UIntCodec is the Codec used for uint values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// UIntCodec registered. type UIntCodec struct { + // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the + // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value. + // + // Deprecated: Use bson.Encoder.IntMinSize instead. EncodeToMinSize bool } var ( defaultUIntCodec = NewUIntCodec() - _ ValueCodec = defaultUIntCodec + // Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used + // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. _ typeDecoder = defaultUIntCodec ) // NewUIntCodec returns a UIntCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// UIntCodec registered. func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go index b1256a4dca..996bd17127 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type ByteSliceCodecOptions struct { EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. } // ByteSliceCodec creates a new *ByteSliceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func ByteSliceCodec() *ByteSliceCodecOptions { return &ByteSliceCodecOptions{} } // SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { bs.EncodeNilAsEmpty = &b return bs } // MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { bs := ByteSliceCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go index 6caaa000e6..f522c7e03f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type EmptyInterfaceCodecOptions struct { DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. } // EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { return &EmptyInterfaceCodecOptions{} } // SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { e.DecodeBinaryAsSlice = &b return e } // MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { e := EmptyInterfaceCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go index 7a6a880b88..a7a7c1d980 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go @@ -7,6 +7,9 @@ package bsonoptions // MapCodecOptions represents all possible options for map encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type MapCodecOptions struct { DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. @@ -19,17 +22,24 @@ type MapCodecOptions struct { } // MapCodec creates a new *MapCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func MapCodec() *MapCodecOptions { return &MapCodecOptions{} } // SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { t.DecodeZerosMap = &b return t } // SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { t.EncodeNilAsEmpty = &b return t @@ -40,12 +50,17 @@ func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { // type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with // fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer // will override TextMarshaler/TextUnmarshaler. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { t.EncodeKeysWithStringer = &b return t } // MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { s := MapCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go index ef965e4b41..3c1e4f35ba 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // SliceCodecOptions represents all possible options for slice encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type SliceCodecOptions struct { EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. } // SliceCodec creates a new *SliceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func SliceCodec() *SliceCodecOptions { return &SliceCodecOptions{} } // SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { s.EncodeNilAsEmpty = &b return s } // MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { s := SliceCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go index 65964f4207..f8b76f996e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go @@ -9,23 +9,34 @@ package bsonoptions var defaultDecodeOIDAsHex = true // StringCodecOptions represents all possible options for string encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type StringCodecOptions struct { DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. } // StringCodec creates a new *StringCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func StringCodec() *StringCodecOptions { return &StringCodecOptions{} } // SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made // from the raw object ID bytes will be used. Defaults to true. +// +// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { t.DecodeObjectIDAsHex = &b return t } // MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { s := &StringCodecOptions{&defaultDecodeOIDAsHex} for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go index 78d1dd8668..1cbfa32e8b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go @@ -9,6 +9,9 @@ package bsonoptions var defaultOverwriteDuplicatedInlinedFields = true // StructCodecOptions represents all possible options for struct encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type StructCodecOptions struct { DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. @@ -18,17 +21,24 @@ type StructCodecOptions struct { } // StructCodec creates a new *StructCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func StructCodec() *StructCodecOptions { return &StructCodecOptions{} } // SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { t.DecodeZeroStruct = &b return t } // SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. +// +// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { t.DecodeDeepZeroInline = &b return t @@ -36,6 +46,8 @@ func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions // SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all // its values set to their default value. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { t.EncodeOmitDefaultStruct = &b return t @@ -45,18 +57,26 @@ func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOpti // same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when // encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if // there are duplicate keys after the struct is inlined. Defaults to true. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions { t.OverwriteDuplicatedInlinedFields = &b return t } // SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. +// +// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be +// supported in Go Driver 2.0. func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { t.AllowUnexportedFields = &b return t } // MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { s := &StructCodecOptions{ OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields, diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go index 13496d1217..3f38433d22 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // TimeCodecOptions represents all possible options for time.Time encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type TimeCodecOptions struct { UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. } // TimeCodec creates a new *TimeCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func TimeCodec() *TimeCodecOptions { return &TimeCodecOptions{} } // SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { t.UseLocalTimeZone = &b return t } // MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { t := TimeCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go index e08b7f192e..5091e4d963 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // UIntCodecOptions represents all possible options for uint encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type UIntCodecOptions struct { EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. } // UIntCodec creates a new *UIntCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func UIntCodec() *UIntCodecOptions { return &UIntCodecOptions{} } // SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead. func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { u.EncodeToMinSize = &b return u } // MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { u := UIntCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go index 5cdf6460bc..33d59bd258 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -17,20 +17,32 @@ import ( // Copier is a type that allows copying between ValueReaders, ValueWriters, and // []byte values. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. type Copier struct{} // NewCopier creates a new copier with the given registry. If a nil registry is provided // a default registry is used. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func NewCopier() Copier { return Copier{} } // CopyDocument handles copying a document from src to dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func CopyDocument(dst ValueWriter, src ValueReader) error { return Copier{}.CopyDocument(dst, src) } // CopyDocument handles copying one document from the src to the dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { dr, err := src.ReadDocument() if err != nil { @@ -47,6 +59,9 @@ func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { // CopyArrayFromBytes copies the values from a BSON array represented as a // []byte to a ValueWriter. +// +// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { aw, err := dst.WriteArray() if err != nil { @@ -63,6 +78,9 @@ func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { // CopyDocumentFromBytes copies the values from a BSON document represented as a // []byte to a ValueWriter. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { dw, err := dst.WriteDocument() if err != nil { @@ -81,6 +99,9 @@ type writeElementFn func(key string) (ValueWriter, error) // CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an // ArrayWriter. +// +// Deprecated: Copying BSON arrays using the ArrayWriter interface will not be supported in Go +// Driver 2.0. func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { wef := func(_ string) (ValueWriter, error) { return dst.WriteArrayElement() @@ -91,6 +112,9 @@ func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { // CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a // DocumentWriter. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { wef := func(key string) (ValueWriter, error) { return dst.WriteDocumentElement(key) @@ -150,12 +174,18 @@ func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { // CopyDocumentToBytes copies an entire document from the ValueReader and // returns it as bytes. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { return c.AppendDocumentBytes(nil, src) } // AppendDocumentBytes functions the same as CopyDocumentToBytes, but will // append the result to dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { if br, ok := src.(BytesReader); ok { _, dst, err := br.ReadValueBytes(dst) @@ -173,6 +203,9 @@ func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) } // AppendArrayBytes copies an array from the ValueReader to dst. +// +// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { if br, ok := src.(BytesReader); ok { _, dst, err := br.ReadValueBytes(dst) @@ -190,6 +223,8 @@ func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { } // CopyValueFromBytes will write the value represtend by t and src to dst. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.UnmarshalValue] instead. func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { if wvb, ok := dst.(BytesWriter); ok { return wvb.WriteValueBytes(t, src) @@ -206,12 +241,17 @@ func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) // CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a // []byte. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.MarshalValue] instead. func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { return c.AppendValueBytes(nil, src) } // AppendValueBytes functions the same as CopyValueToBytes, but will append the // result to dst. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { if br, ok := src.(BytesReader); ok { return br.ReadValueBytes(dst) @@ -234,6 +274,9 @@ func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, [] } // CopyValue will copy a single value from src to dst. +// +// Deprecated: Copying BSON values using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { var err error switch src.Type() { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go index 35832d73aa..2aca37a91f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go @@ -16,11 +16,15 @@ import ( ) // ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. type ExtJSONValueReaderPool struct { pool sync.Pool } // NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { return &ExtJSONValueReaderPool{ pool: sync.Pool{ @@ -32,6 +36,8 @@ func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { } // Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { vr := bvrp.pool.Get().(*extJSONValueReader) return vr.reset(r, canonical) @@ -39,6 +45,8 @@ func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReade // Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing // is inserted into the pool and ok will be false. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { bvr, ok := vr.(*extJSONValueReader) if !ok { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go index 99ed524b77..bb9303167c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go @@ -23,11 +23,15 @@ import ( ) // ExtJSONValueWriterPool is a pool for ExtJSON ValueWriters. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. type ExtJSONValueWriterPool struct { pool sync.Pool } // NewExtJSONValueWriterPool creates a new pool for ValueWriter instances that write to ExtJSON. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. func NewExtJSONValueWriterPool() *ExtJSONValueWriterPool { return &ExtJSONValueWriterPool{ pool: sync.Pool{ @@ -39,6 +43,8 @@ func NewExtJSONValueWriterPool() *ExtJSONValueWriterPool { } // Get retrieves a ExtJSON ValueWriter from the pool and resets it to use w as the destination. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *ExtJSONValueWriterPool) Get(w io.Writer, canonical, escapeHTML bool) ValueWriter { vw := bvwp.pool.Get().(*extJSONValueWriter) if writer, ok := w.(*SliceWriter); ok { @@ -53,6 +59,8 @@ func (bvwp *ExtJSONValueWriterPool) Get(w io.Writer, canonical, escapeHTML bool) // Put inserts a ValueWriter into the pool. If the ValueWriter is not a ExtJSON ValueWriter, nothing // happens and ok will be false. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *ExtJSONValueWriterPool) Put(vw ValueWriter) (ok bool) { bvw, ok := vw.(*extJSONValueWriter) if !ok { @@ -80,6 +88,7 @@ type extJSONValueWriter struct { frame int64 canonical bool escapeHTML bool + newlines bool } // NewExtJSONValueWriter creates a ValueWriter that writes Extended JSON to w. @@ -88,10 +97,13 @@ func NewExtJSONValueWriter(w io.Writer, canonical, escapeHTML bool) (ValueWriter return nil, errNilWriter } - return newExtJSONWriter(w, canonical, escapeHTML), nil + // Enable newlines for all Extended JSON value writers created by NewExtJSONValueWriter. We + // expect these value writers to be used with an Encoder, which should add newlines after + // encoded Extended JSON documents. + return newExtJSONWriter(w, canonical, escapeHTML, true), nil } -func newExtJSONWriter(w io.Writer, canonical, escapeHTML bool) *extJSONValueWriter { +func newExtJSONWriter(w io.Writer, canonical, escapeHTML, newlines bool) *extJSONValueWriter { stack := make([]ejvwState, 1, 5) stack[0] = ejvwState{mode: mTopLevel} @@ -101,6 +113,7 @@ func newExtJSONWriter(w io.Writer, canonical, escapeHTML bool) *extJSONValueWrit stack: stack, canonical: canonical, escapeHTML: escapeHTML, + newlines: newlines, } } @@ -564,6 +577,12 @@ func (ejvw *extJSONValueWriter) WriteDocumentEnd() error { case mDocument: ejvw.buf = append(ejvw.buf, ',') case mTopLevel: + // If the value writer has newlines enabled, end top-level documents with a newline so that + // multiple documents encoded to the same writer are separated by newlines. That matches the + // Go json.Encoder behavior and also works with bsonrw.NewExtJSONValueReader. + if ejvw.newlines { + ejvw.buf = append(ejvw.buf, '\n') + } if ejvw.w != nil { if _, err := ejvw.w.Write(ejvw.buf); err != nil { return err diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go index 0b8fa28d57..324b10b616 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go @@ -58,6 +58,8 @@ type ValueReader interface { // types that implement ValueReader may also implement this interface. // // The bytes of the value will be appended to dst. +// +// Deprecated: BytesReader will not be supported in Go Driver 2.0. type BytesReader interface { ReadValueBytes(dst []byte) (bsontype.Type, []byte, error) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index ef5d837c2f..9bf24fae0b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -28,11 +28,15 @@ var vrPool = sync.Pool{ } // BSONValueReaderPool is a pool for ValueReaders that read BSON. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. type BSONValueReaderPool struct { pool sync.Pool } // NewBSONValueReaderPool instantiates a new BSONValueReaderPool. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. func NewBSONValueReaderPool() *BSONValueReaderPool { return &BSONValueReaderPool{ pool: sync.Pool{ @@ -44,6 +48,8 @@ func NewBSONValueReaderPool() *BSONValueReaderPool { } // Get retrieves a ValueReader from the pool and uses src as the underlying BSON. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *BSONValueReaderPool) Get(src []byte) ValueReader { vr := bvrp.pool.Get().(*valueReader) vr.reset(src) @@ -52,6 +58,8 @@ func (bvrp *BSONValueReaderPool) Get(src []byte) ValueReader { // Put inserts a ValueReader into the pool. If the ValueReader is not a BSON ValueReader nothing // is inserted into the pool and ok will be false. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *BSONValueReaderPool) Put(vr ValueReader) (ok bool) { bvr, ok := vr.(*valueReader) if !ok { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go index f95a08afd5..a6dd8d34f5 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go @@ -29,11 +29,15 @@ var vwPool = sync.Pool{ } // BSONValueWriterPool is a pool for BSON ValueWriters. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. type BSONValueWriterPool struct { pool sync.Pool } // NewBSONValueWriterPool creates a new pool for ValueWriter instances that write to BSON. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func NewBSONValueWriterPool() *BSONValueWriterPool { return &BSONValueWriterPool{ pool: sync.Pool{ @@ -45,6 +49,8 @@ func NewBSONValueWriterPool() *BSONValueWriterPool { } // Get retrieves a BSON ValueWriter from the pool and resets it to use w as the destination. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *BSONValueWriterPool) Get(w io.Writer) ValueWriter { vw := bvwp.pool.Get().(*valueWriter) @@ -56,6 +62,8 @@ func (bvwp *BSONValueWriterPool) Get(w io.Writer) ValueWriter { } // GetAtModeElement retrieves a ValueWriterFlusher from the pool and resets it to use w as the destination. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *BSONValueWriterPool) GetAtModeElement(w io.Writer) ValueWriterFlusher { vw := bvwp.Get(w).(*valueWriter) vw.push(mElement) @@ -64,6 +72,8 @@ func (bvwp *BSONValueWriterPool) GetAtModeElement(w io.Writer) ValueWriterFlushe // Put inserts a ValueWriter into the pool. If the ValueWriter is not a BSON ValueWriter, nothing // happens and ok will be false. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *BSONValueWriterPool) Put(vw ValueWriter) (ok bool) { bvw, ok := vw.(*valueWriter) if !ok { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go index dff65f87f7..628f452932 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go @@ -56,6 +56,8 @@ type ValueWriter interface { } // ValueWriterFlusher is a superset of ValueWriter that exposes functionality to flush to the underlying buffer. +// +// Deprecated: ValueWriterFlusher will not be supported in Go Driver 2.0. type ValueWriterFlusher interface { ValueWriter Flush() error @@ -64,13 +66,20 @@ type ValueWriterFlusher interface { // BytesWriter is the interface used to write BSON bytes to a ValueWriter. // This interface is meant to be a superset of ValueWriter, so that types that // implement ValueWriter may also implement this interface. +// +// Deprecated: BytesWriter will not be supported in Go Driver 2.0. type BytesWriter interface { WriteValueBytes(t bsontype.Type, b []byte) error } // SliceWriter allows a pointer to a slice of bytes to be used as an io.Writer. +// +// Deprecated: SliceWriter will not be supported in Go Driver 2.0. type SliceWriter []byte +// Write writes the bytes to the underlying slice. +// +// Deprecated: SliceWriter will not be supported in Go Driver 2.0. func (sw *SliceWriter) Write(p []byte) (int, error) { written := len(p) *sw = append(*sw, p...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go index 7c91ae5186..f38c263a4c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go @@ -8,7 +8,9 @@ // a stringifier for the Type to enable easier debugging when working with BSON. package bsontype // import "go.mongodb.org/mongo-driver/bson/bsontype" -// These constants uniquely refer to each BSON type. +// BSON element types as described in https://bsonspec.org/spec.html. +// +// Deprecated: Use bson.Type* constants instead. const ( Double Type = 0x01 String Type = 0x02 @@ -31,7 +33,12 @@ const ( Decimal128 Type = 0x13 MinKey Type = 0xFF MaxKey Type = 0x7F +) +// BSON binary element subtypes as described in https://bsonspec.org/spec.html. +// +// Deprecated: Use the bson.TypeBinary* constants instead. +const ( BinaryGeneric byte = 0x00 BinaryFunction byte = 0x01 BinaryBinaryOld byte = 0x02 diff --git a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go index 6e189fa589..eac74cd399 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go @@ -38,6 +38,12 @@ type Decoder struct { // (*Decoder).SetContext. defaultDocumentM bool defaultDocumentD bool + + binaryAsSlice bool + useJSONStructTags bool + useLocalTimeZone bool + zeroMaps bool + zeroStructs bool } // NewDecoder returns a new decoder that uses the DefaultRegistry to read from vr. @@ -53,6 +59,9 @@ func NewDecoder(vr bsonrw.ValueReader) (*Decoder, error) { } // NewDecoderWithContext returns a new decoder that uses DecodeContext dc to read from vr. +// +// Deprecated: Use [NewDecoder] and use the Decoder configuration methods set the desired unmarshal +// behavior instead. func NewDecoderWithContext(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (*Decoder, error) { if dc.Registry == nil { dc.Registry = DefaultRegistry @@ -70,8 +79,7 @@ func NewDecoderWithContext(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (* // Decode reads the next BSON document from the stream and decodes it into the // value pointed to by val. // -// The documentation for Unmarshal contains details about of BSON into a Go -// value. +// See [Unmarshal] for details about BSON unmarshaling behavior. func (d *Decoder) Decode(val interface{}) error { if unmarshaler, ok := val.(Unmarshaler); ok { // TODO(skriptble): Reuse a []byte here and use the AppendDocumentBytes method. @@ -100,42 +108,101 @@ func (d *Decoder) Decode(val interface{}) error { if err != nil { return err } + if d.defaultDocumentM { d.dc.DefaultDocumentM() } if d.defaultDocumentD { d.dc.DefaultDocumentD() } + if d.binaryAsSlice { + d.dc.BinaryAsSlice() + } + if d.useJSONStructTags { + d.dc.UseJSONStructTags() + } + if d.useLocalTimeZone { + d.dc.UseLocalTimeZone() + } + if d.zeroMaps { + d.dc.ZeroMaps() + } + if d.zeroStructs { + d.dc.ZeroStructs() + } + return decoder.DecodeValue(d.dc, d.vr, rval) } // Reset will reset the state of the decoder, using the same *DecodeContext used in // the original construction but using vr for reading. func (d *Decoder) Reset(vr bsonrw.ValueReader) error { + // TODO:(GODRIVER-2719): Remove error return value. d.vr = vr return nil } // SetRegistry replaces the current registry of the decoder with r. func (d *Decoder) SetRegistry(r *bsoncodec.Registry) error { + // TODO:(GODRIVER-2719): Remove error return value. d.dc.Registry = r return nil } // SetContext replaces the current registry of the decoder with dc. +// +// Deprecated: Use the Decoder configuration methods to set the desired unmarshal behavior instead. func (d *Decoder) SetContext(dc bsoncodec.DecodeContext) error { + // TODO:(GODRIVER-2719): Remove error return value. d.dc = dc return nil } -// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". func (d *Decoder) DefaultDocumentM() { d.defaultDocumentM = true } -// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". func (d *Decoder) DefaultDocumentD() { d.defaultDocumentD = true } + +// AllowTruncatingDoubles causes the Decoder to truncate the fractional part of BSON "double" values +// when attempting to unmarshal them into a Go integer (int, int8, int16, int32, or int64) struct +// field. The truncation logic does not apply to BSON "decimal128" values. +func (d *Decoder) AllowTruncatingDoubles() { + d.dc.Truncate = true +} + +// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or +// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. +func (d *Decoder) BinaryAsSlice() { + d.binaryAsSlice = true +} + +// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +func (d *Decoder) UseJSONStructTags() { + d.useJSONStructTags = true +} + +// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead +// of the UTC timezone. +func (d *Decoder) UseLocalTimeZone() { + d.useLocalTimeZone = true +} + +// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value +// passed to Decode before unmarshaling BSON documents into them. +func (d *Decoder) ZeroMaps() { + d.zeroMaps = true +} + +// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination +// value passed to Decode before unmarshaling BSON documents into them. +func (d *Decoder) ZeroStructs() { + d.zeroStructs = true +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 0134006d8e..048b5eb998 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -7,7 +7,8 @@ // Package bson is a library for reading, writing, and manipulating BSON. BSON is a binary serialization format used to // store documents and make remote procedure calls in MongoDB. The BSON specification is located at https://bsonspec.org. // The BSON library handles marshalling and unmarshalling of values through a configurable codec system. For a description -// of the codec system and examples of registering custom codecs, see the bsoncodec package. +// of the codec system and examples of registering custom codecs, see the bsoncodec package. For additional information and +// usage examples, check out the [Work with BSON] page in the Go Driver docs site. // // # Raw BSON // @@ -138,4 +139,6 @@ // # Marshalling and Unmarshalling // // Manually marshalling and unmarshalling can be done with the Marshal and Unmarshal family of functions. +// +// [Work with BSON]: https://www.mongodb.com/docs/drivers/go/current/fundamentals/bson/ package bson diff --git a/vendor/go.mongodb.org/mongo-driver/bson/encoder.go b/vendor/go.mongodb.org/mongo-driver/bson/encoder.go index fe5125d086..0be2a97fbc 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/encoder.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/encoder.go @@ -29,10 +29,20 @@ var encPool = sync.Pool{ type Encoder struct { ec bsoncodec.EncodeContext vw bsonrw.ValueWriter + + errorOnInlineDuplicates bool + intMinSize bool + stringifyMapKeysWithFmt bool + nilMapAsEmpty bool + nilSliceAsEmpty bool + nilByteSliceAsEmpty bool + omitZeroStruct bool + useJSONStructTags bool } // NewEncoder returns a new encoder that uses the DefaultRegistry to write to vw. func NewEncoder(vw bsonrw.ValueWriter) (*Encoder, error) { + // TODO:(GODRIVER-2719): Remove error return value. if vw == nil { return nil, errors.New("cannot create a new Encoder with a nil ValueWriter") } @@ -44,6 +54,9 @@ func NewEncoder(vw bsonrw.ValueWriter) (*Encoder, error) { } // NewEncoderWithContext returns a new encoder that uses EncodeContext ec to write to vw. +// +// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal +// behavior instead. func NewEncoderWithContext(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter) (*Encoder, error) { if ec.Registry == nil { ec = bsoncodec.EncodeContext{Registry: DefaultRegistry} @@ -60,8 +73,7 @@ func NewEncoderWithContext(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter) (* // Encode writes the BSON encoding of val to the stream. // -// The documentation for Marshal contains details about the conversion of Go -// values to BSON. +// See [Marshal] for details about BSON marshaling behavior. func (e *Encoder) Encode(val interface{}) error { if marshaler, ok := val.(Marshaler); ok { // TODO(skriptble): Should we have a MarshalAppender interface so that we can have []byte reuse? @@ -76,24 +88,112 @@ func (e *Encoder) Encode(val interface{}) error { if err != nil { return err } + + // Copy the configurations applied to the Encoder over to the EncodeContext, which actually + // communicates those configurations to the default ValueEncoders. + if e.errorOnInlineDuplicates { + e.ec.ErrorOnInlineDuplicates() + } + if e.intMinSize { + e.ec.MinSize = true + } + if e.stringifyMapKeysWithFmt { + e.ec.StringifyMapKeysWithFmt() + } + if e.nilMapAsEmpty { + e.ec.NilMapAsEmpty() + } + if e.nilSliceAsEmpty { + e.ec.NilSliceAsEmpty() + } + if e.nilByteSliceAsEmpty { + e.ec.NilByteSliceAsEmpty() + } + if e.omitZeroStruct { + e.ec.OmitZeroStruct() + } + if e.useJSONStructTags { + e.ec.UseJSONStructTags() + } + return encoder.EncodeValue(e.ec, e.vw, reflect.ValueOf(val)) } -// Reset will reset the state of the encoder, using the same *EncodeContext used in +// Reset will reset the state of the Encoder, using the same *EncodeContext used in // the original construction but using vw. func (e *Encoder) Reset(vw bsonrw.ValueWriter) error { + // TODO:(GODRIVER-2719): Remove error return value. e.vw = vw return nil } -// SetRegistry replaces the current registry of the encoder with r. +// SetRegistry replaces the current registry of the Encoder with r. func (e *Encoder) SetRegistry(r *bsoncodec.Registry) error { + // TODO:(GODRIVER-2719): Remove error return value. e.ec.Registry = r return nil } -// SetContext replaces the current EncodeContext of the encoder with er. +// SetContext replaces the current EncodeContext of the encoder with ec. +// +// Deprecated: Use the Encoder configuration methods set the desired marshal behavior instead. func (e *Encoder) SetContext(ec bsoncodec.EncodeContext) error { + // TODO:(GODRIVER-2719): Remove error return value. e.ec = ec return nil } + +// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in +// the marshaled BSON when the "inline" struct tag option is set. +func (e *Encoder) ErrorOnInlineDuplicates() { + e.errorOnInlineDuplicates = true +} + +// IntMinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, uint, +// uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) that can +// represent the integer value. +func (e *Encoder) IntMinSize() { + e.intMinSize = true +} + +// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name +// strings using fmt.Sprint instead of the default string conversion logic. +func (e *Encoder) StringifyMapKeysWithFmt() { + e.stringifyMapKeysWithFmt = true +} + +// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON +// null. +func (e *Encoder) NilMapAsEmpty() { + e.nilMapAsEmpty = true +} + +// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON +// null. +func (e *Encoder) NilSliceAsEmpty() { + e.nilSliceAsEmpty = true +} + +// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values +// instead of BSON null. +func (e *Encoder) NilByteSliceAsEmpty() { + e.nilByteSliceAsEmpty = true +} + +// TODO(GODRIVER-2820): Update the description to remove the note about only examining exported +// TODO struct fields once the logic is updated to also inspect private struct fields. + +// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) +// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. +// +// Note that the Encoder only examines exported struct fields when determining if a struct is the +// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. +func (e *Encoder) OmitZeroStruct() { + e.omitZeroStruct = true +} + +// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +func (e *Encoder) UseJSONStructTags() { + e.useJSONStructTags = true +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go index db8d8ee92b..f2c48d049e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go @@ -20,17 +20,23 @@ const defaultDstCap = 256 var bvwPool = bsonrw.NewBSONValueWriterPool() var extjPool = bsonrw.NewExtJSONValueWriterPool() -// Marshaler is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. +// Marshaler is the interface implemented by types that can marshal themselves +// into a valid BSON document. +// +// Implementations of Marshaler must return a full BSON document. To create +// custom BSON marshaling behavior for individual values in a BSON document, +// implement the ValueMarshaler interface instead. type Marshaler interface { MarshalBSON() ([]byte, error) } -// ValueMarshaler is an interface implemented by types that can marshal -// themselves into a BSON value as bytes. The type must be the valid type for -// the bytes returned. The bytes and byte type together must be valid if the -// error is nil. +// ValueMarshaler is the interface implemented by types that can marshal +// themselves into a valid BSON value. The format of the returned bytes must +// match the returned type. +// +// Implementations of ValueMarshaler must return an individual BSON value. To +// create custom BSON marshaling behavior for an entire BSON document, implement +// the Marshaler interface instead. type ValueMarshaler interface { MarshalBSONValue() (bsontype.Type, []byte, error) } @@ -48,12 +54,42 @@ func Marshal(val interface{}) ([]byte, error) { // MarshalAppend will encode val as a BSON document and append the bytes to dst. If dst is not large enough to hold the // bytes, it will be grown. If val is not a type that can be transformed into a document, MarshalValueAppend should be // used instead. +// +// Deprecated: Use [NewEncoder] and pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewBSONValueWriter]: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// +// See [Encoder] for more examples. func MarshalAppend(dst []byte, val interface{}) ([]byte, error) { return MarshalAppendWithRegistry(DefaultRegistry, dst, val) } // MarshalWithRegistry returns the BSON encoding of val as a BSON document. If val is not a type that can be transformed // into a document, MarshalValueWithRegistry should be used instead. +// +// Deprecated: Use [NewEncoder] and specify the Registry by calling [Encoder.SetRegistry] instead: +// +// buf := new(bytes.Buffer) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.SetRegistry(reg) +// +// See [Encoder] for more examples. func MarshalWithRegistry(r *bsoncodec.Registry, val interface{}) ([]byte, error) { dst := make([]byte, 0) return MarshalAppendWithRegistry(r, dst, val) @@ -61,6 +97,22 @@ func MarshalWithRegistry(r *bsoncodec.Registry, val interface{}) ([]byte, error) // MarshalWithContext returns the BSON encoding of val as a BSON document using EncodeContext ec. If val is not a type // that can be transformed into a document, MarshalValueWithContext should be used instead. +// +// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalWithContext(ec bsoncodec.EncodeContext, val interface{}) ([]byte, error) { dst := make([]byte, 0) return MarshalAppendWithContext(ec, dst, val) @@ -69,6 +121,22 @@ func MarshalWithContext(ec bsoncodec.EncodeContext, val interface{}) ([]byte, er // MarshalAppendWithRegistry will encode val as a BSON document using Registry r and append the bytes to dst. If dst is // not large enough to hold the bytes, it will be grown. If val is not a type that can be transformed into a document, // MarshalValueAppendWithRegistry should be used instead. +// +// Deprecated: Use [NewEncoder], and pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewBSONValueWriter], and specify the Registry by calling [Encoder.SetRegistry] instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.SetRegistry(reg) +// +// See [Encoder] for more examples. func MarshalAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) ([]byte, error) { return MarshalAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val) } @@ -76,6 +144,23 @@ func MarshalAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{ // MarshalAppendWithContext will encode val as a BSON document using Registry r and EncodeContext ec and append the // bytes to dst. If dst is not large enough to hold the bytes, it will be grown. If val is not a type that can be // transformed into a document, MarshalValueAppendWithContext should be used instead. +// +// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewBSONValueWriter], and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) ([]byte, error) { sw := new(bsonrw.SliceWriter) *sw = dst @@ -112,17 +197,26 @@ func MarshalValue(val interface{}) (bsontype.Type, []byte, error) { // MarshalValueAppend will append the BSON encoding of val to dst. If dst is not large enough to hold the BSON encoding // of val, dst will be grown. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func MarshalValueAppend(dst []byte, val interface{}) (bsontype.Type, []byte, error) { return MarshalValueAppendWithRegistry(DefaultRegistry, dst, val) } // MarshalValueWithRegistry returns the BSON encoding of val using Registry r. +// +// Deprecated: Using a custom registry to marshal individual BSON values will not be supported in Go +// Driver 2.0. func MarshalValueWithRegistry(r *bsoncodec.Registry, val interface{}) (bsontype.Type, []byte, error) { dst := make([]byte, 0) return MarshalValueAppendWithRegistry(r, dst, val) } // MarshalValueWithContext returns the BSON encoding of val using EncodeContext ec. +// +// Deprecated: Using a custom EncodeContext to marshal individual BSON elements will not be +// supported in Go Driver 2.0. func MarshalValueWithContext(ec bsoncodec.EncodeContext, val interface{}) (bsontype.Type, []byte, error) { dst := make([]byte, 0) return MarshalValueAppendWithContext(ec, dst, val) @@ -130,12 +224,18 @@ func MarshalValueWithContext(ec bsoncodec.EncodeContext, val interface{}) (bsont // MarshalValueAppendWithRegistry will append the BSON encoding of val to dst using Registry r. If dst is not large // enough to hold the BSON encoding of val, dst will be grown. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func MarshalValueAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) (bsontype.Type, []byte, error) { return MarshalValueAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val) } // MarshalValueAppendWithContext will append the BSON encoding of val to dst using EncodeContext ec. If dst is not large // enough to hold the BSON encoding of val, dst will be grown. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func MarshalValueAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) (bsontype.Type, []byte, error) { // get a ValueWriter configured to write to dst sw := new(bsonrw.SliceWriter) @@ -173,17 +273,63 @@ func MarshalExtJSON(val interface{}, canonical, escapeHTML bool) ([]byte, error) // MarshalExtJSONAppend will append the extended JSON encoding of val to dst. // If dst is not large enough to hold the extended JSON encoding of val, dst // will be grown. +// +// Deprecated: Use [NewEncoder] and pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewExtJSONValueWriter] instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// +// See [Encoder] for more examples. func MarshalExtJSONAppend(dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) { return MarshalExtJSONAppendWithRegistry(DefaultRegistry, dst, val, canonical, escapeHTML) } // MarshalExtJSONWithRegistry returns the extended JSON encoding of val using Registry r. +// +// Deprecated: Use [NewEncoder] and specify the Registry by calling [Encoder.SetRegistry] instead: +// +// buf := new(bytes.Buffer) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.SetRegistry(reg) +// +// See [Encoder] for more examples. func MarshalExtJSONWithRegistry(r *bsoncodec.Registry, val interface{}, canonical, escapeHTML bool) ([]byte, error) { dst := make([]byte, 0, defaultDstCap) return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML) } // MarshalExtJSONWithContext returns the extended JSON encoding of val using Registry r. +// +// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := new(bytes.Buffer) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalExtJSONWithContext(ec bsoncodec.EncodeContext, val interface{}, canonical, escapeHTML bool) ([]byte, error) { dst := make([]byte, 0, defaultDstCap) return MarshalExtJSONAppendWithContext(ec, dst, val, canonical, escapeHTML) @@ -192,6 +338,22 @@ func MarshalExtJSONWithContext(ec bsoncodec.EncodeContext, val interface{}, cano // MarshalExtJSONAppendWithRegistry will append the extended JSON encoding of // val to dst using Registry r. If dst is not large enough to hold the BSON // encoding of val, dst will be grown. +// +// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewExtJSONValueWriter], and specify the Registry by calling [Encoder.SetRegistry] +// instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// +// See [Encoder] for more examples. func MarshalExtJSONAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) { return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML) } @@ -199,6 +361,23 @@ func MarshalExtJSONAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val int // MarshalExtJSONAppendWithContext will append the extended JSON encoding of // val to dst using Registry r. If dst is not large enough to hold the BSON // encoding of val, dst will be grown. +// +// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewExtJSONValueWriter], and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalExtJSONAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) { sw := new(bsonrw.SliceWriter) *sw = dst diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go index ded3673165..9bbaffac26 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go @@ -82,18 +82,18 @@ func ObjectIDFromHex(s string) (ObjectID, error) { return NilObjectID, ErrInvalidHex } - b, err := hex.DecodeString(s) + var oid [12]byte + _, err := hex.Decode(oid[:], []byte(s)) if err != nil { return NilObjectID, err } - var oid [12]byte - copy(oid[:], b) - return oid, nil } // IsValidObjectID returns true if the provided hex string represents a valid ObjectID and false if not. +// +// Deprecated: Use ObjectIDFromHex and check the error instead. func IsValidObjectID(s string) bool { _, err := ObjectIDFromHex(s) return err == nil diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go index c72ccc1c4d..65f4fbb949 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go @@ -45,7 +45,7 @@ var _ json.Unmarshaler = (*DateTime)(nil) // MarshalJSON marshal to time type. func (d DateTime) MarshalJSON() ([]byte, error) { - return json.Marshal(d.Time()) + return json.Marshal(d.Time().UTC()) } // UnmarshalJSON creates a primitive.DateTime from a JSON string. @@ -141,6 +141,16 @@ type Timestamp struct { I uint32 } +// After reports whether the time instant tp is after tp2. +func (tp Timestamp) After(tp2 Timestamp) bool { + return tp.T > tp2.T || (tp.T == tp2.T && tp.I > tp2.I) +} + +// Before reports whether the time instant tp is before tp2. +func (tp Timestamp) Before(tp2 Timestamp) bool { + return tp.T < tp2.T || (tp.T == tp2.T && tp.I < tp2.I) +} + // Equal compares tp to tp2 and returns true if they are equal. func (tp Timestamp) Equal(tp2 Timestamp) bool { return tp.T == tp2.T && tp.I == tp2.I @@ -151,24 +161,25 @@ func (tp Timestamp) IsZero() bool { return tp.T == 0 && tp.I == 0 } -// CompareTimestamp returns an integer comparing two Timestamps, where T is compared first, followed by I. -// Returns 0 if tp = tp2, 1 if tp > tp2, -1 if tp < tp2. -func CompareTimestamp(tp, tp2 Timestamp) int { - if tp.Equal(tp2) { +// Compare compares the time instant tp with tp2. If tp is before tp2, it returns -1; if tp is after +// tp2, it returns +1; if they're the same, it returns 0. +func (tp Timestamp) Compare(tp2 Timestamp) int { + switch { + case tp.Equal(tp2): return 0 - } - - if tp.T > tp2.T { - return 1 - } - if tp.T < tp2.T { + case tp.Before(tp2): return -1 + default: + return +1 } - // Compare I values because T values are equal - if tp.I > tp2.I { - return 1 - } - return -1 +} + +// CompareTimestamp compares the time instant tp with tp2. If tp is before tp2, it returns -1; if tp is after +// tp2, it returns +1; if they're the same, it returns 0. +// +// Deprecated: Use Timestamp.Compare instead. +func CompareTimestamp(tp, tp2 Timestamp) int { + return tp.Compare(tp2) } // MinKey represents the BSON minkey value. @@ -186,6 +197,9 @@ type MaxKey struct{} type D []E // Map creates a map from the elements of the D. +// +// Deprecated: Converting directly from a D to an M will not be supported in Go Driver 2.0. Instead, +// users should marshal the D to BSON using bson.Marshal and unmarshal it to M using bson.Unmarshal. func (d D) Map() M { m := make(M, len(d)) for _, e := range d { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go index 1cbe3884d1..6b9602589c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go @@ -21,10 +21,16 @@ var primitiveCodecs PrimitiveCodecs // PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types // defined in this package. +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. type PrimitiveCodecs struct{} // RegisterPrimitiveCodecs will register the encode and decode methods attached to PrimitiveCodecs // with the provided RegistryBuilder. if rb is nil, a new empty RegistryBuilder will be created. +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) { if rb == nil { panic(errors.New("argument to RegisterPrimitiveCodecs must not be nil")) @@ -38,7 +44,10 @@ func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) } // RawValueEncodeValue is the ValueEncoderFunc for RawValue. -func (PrimitiveCodecs) RawValueEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. +func (PrimitiveCodecs) RawValueEncodeValue(_ bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tRawValue { return bsoncodec.ValueEncoderError{Name: "RawValueEncodeValue", Types: []reflect.Type{tRawValue}, Received: val} } @@ -49,7 +58,10 @@ func (PrimitiveCodecs) RawValueEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw } // RawValueDecodeValue is the ValueDecoderFunc for RawValue. -func (PrimitiveCodecs) RawValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. +func (PrimitiveCodecs) RawValueDecodeValue(_ bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tRawValue { return bsoncodec.ValueDecoderError{Name: "RawValueDecodeValue", Types: []reflect.Type{tRawValue}, Received: val} } @@ -64,7 +76,10 @@ func (PrimitiveCodecs) RawValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw } // RawEncodeValue is the ValueEncoderFunc for Reader. -func (PrimitiveCodecs) RawEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. +func (PrimitiveCodecs) RawEncodeValue(_ bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tRaw { return bsoncodec.ValueEncoderError{Name: "RawEncodeValue", Types: []reflect.Type{tRaw}, Received: val} } @@ -75,7 +90,10 @@ func (PrimitiveCodecs) RawEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.Valu } // RawDecodeValue is the ValueDecoderFunc for Reader. -func (PrimitiveCodecs) RawDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. +func (PrimitiveCodecs) RawDecodeValue(_ bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tRaw { return bsoncodec.ValueDecoderError{Name: "RawDecodeValue", Types: []reflect.Type{tRaw}, Received: val} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw.go b/vendor/go.mongodb.org/mongo-driver/bson/raw.go index efd705daaa..fe990a1771 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw.go @@ -16,18 +16,27 @@ import ( // ErrNilReader indicates that an operation was attempted on a nil bson.Reader. var ErrNilReader = errors.New("nil reader") -// Raw is a wrapper around a byte slice. It will interpret the slice as a -// BSON document. This type is a wrapper around a bsoncore.Document. Errors returned from the -// methods on this type and associated types come from the bsoncore package. +// Raw is a raw encoded BSON document. It can be used to delay BSON document decoding or precompute +// a BSON encoded document. +// +// A Raw must be a full BSON document. Use the RawValue type for individual BSON values. type Raw []byte -// NewFromIOReader reads in a document from the given io.Reader and constructs a Raw from -// it. -func NewFromIOReader(r io.Reader) (Raw, error) { +// ReadDocument reads a BSON document from the io.Reader and returns it as a bson.Raw. If the +// reader contains multiple BSON documents, only the first document is read. +func ReadDocument(r io.Reader) (Raw, error) { doc, err := bsoncore.NewDocumentFromReader(r) return Raw(doc), err } +// NewFromIOReader reads a BSON document from the io.Reader and returns it as a bson.Raw. If the +// reader contains multiple BSON documents, only the first document is read. +// +// Deprecated: Use ReadDocument instead. +func NewFromIOReader(r io.Reader) (Raw, error) { + return ReadDocument(r) +} + // Validate validates the document. This method only validates the first document in // the slice, to validate other documents, the slice must be resliced. func (r Raw) Validate() (err error) { return bsoncore.Document(r).Validate() } @@ -81,5 +90,5 @@ func (r Raw) IndexErr(index uint) (RawElement, error) { return RawElement(elem), err } -// String implements the fmt.Stringer interface. +// String returns the BSON document encoded as Extended JSON. func (r Raw) String() string { return bsoncore.Document(r).String() } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go index 006f503a30..8ce13c2cc7 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go @@ -10,10 +10,7 @@ import ( "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) -// RawElement represents a BSON element in byte form. This type provides a simple way to -// transform a slice of bytes into a BSON element and extract information from it. -// -// RawElement is a thin wrapper around a bsoncore.Element. +// RawElement is a raw encoded BSON document or array element. type RawElement []byte // Key returns the key for this element. If the element is not valid, this method returns an empty @@ -36,7 +33,7 @@ func (re RawElement) ValueErr() (RawValue, error) { // Validate ensures re is a valid BSON element. func (re RawElement) Validate() error { return bsoncore.Element(re).Validate() } -// String implements the fmt.Stringer interface. The output will be in extended JSON format. +// String returns the BSON element encoded as Extended JSON. func (re RawElement) String() string { doc := bsoncore.BuildDocument(nil, re) j, err := MarshalExtJSON(Raw(doc), true, false) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go index 75297f30fe..6627294c4d 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go @@ -26,11 +26,10 @@ var ErrNilContext = errors.New("DecodeContext cannot be nil") // ErrNilRegistry is returned when the provided registry is nil. var ErrNilRegistry = errors.New("Registry cannot be nil") -// RawValue represents a BSON value in byte form. It can be used to hold unprocessed BSON or to -// defer processing of BSON. Type is the BSON type of the value and Value are the raw bytes that -// represent the element. +// RawValue is a raw encoded BSON value. It can be used to delay BSON value decoding or precompute +// BSON encoded value. Type is the BSON type of the value and Value is the raw encoded BSON value. // -// This type wraps bsoncore.Value for most of it's functionality. +// A RawValue must be an individual BSON value. Use the Raw type for full BSON documents. type RawValue struct { Type bsontype.Type Value []byte @@ -268,10 +267,16 @@ func (rv RawValue) Int32OK() (int32, bool) { return convertToCoreValue(rv).Int32 // AsInt32 returns a BSON number as an int32. If the BSON type is not a numeric one, this method // will panic. +// +// Deprecated: Use AsInt64 instead. If an int32 is required, convert the returned value to an int32 +// and perform any required overflow/underflow checking. func (rv RawValue) AsInt32() int32 { return convertToCoreValue(rv).AsInt32() } // AsInt32OK is the same as AsInt32, except that it returns a boolean instead of // panicking. +// +// Deprecated: Use AsInt64OK instead. If an int32 is required, convert the returned value to an +// int32 and perform any required overflow/underflow checking. func (rv RawValue) AsInt32OK() (int32, bool) { return convertToCoreValue(rv).AsInt32OK() } // Timestamp returns the BSON timestamp value the Value represents. It panics if the value is a diff --git a/vendor/go.mongodb.org/mongo-driver/bson/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/registry.go index 16d7573e75..b5b0f35687 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/registry.go @@ -6,15 +6,19 @@ package bson -import "go.mongodb.org/mongo-driver/bson/bsoncodec" +import ( + "go.mongodb.org/mongo-driver/bson/bsoncodec" +) // DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the // primitive codecs. -var DefaultRegistry = NewRegistryBuilder().Build() +var DefaultRegistry = NewRegistry() // NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and // decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the // PrimitiveCodecs type in this package. +// +// Deprecated: Use NewRegistry instead. func NewRegistryBuilder() *bsoncodec.RegistryBuilder { rb := bsoncodec.NewRegistryBuilder() bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb) @@ -22,3 +26,10 @@ func NewRegistryBuilder() *bsoncodec.RegistryBuilder { primitiveCodecs.RegisterPrimitiveCodecs(rb) return rb } + +// NewRegistry creates a new Registry configured with the default encoders and decoders from the +// bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the PrimitiveCodecs +// type in this package. +func NewRegistry() *bsoncodec.Registry { + return NewRegistryBuilder().Build() +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/types.go b/vendor/go.mongodb.org/mongo-driver/bson/types.go index 13a1c35cf6..e201ac37eb 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/types.go @@ -10,7 +10,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -// These constants uniquely refer to each BSON type. +// BSON element types as described in https://bsonspec.org/spec.html. const ( TypeDouble = bsontype.Double TypeString = bsontype.String @@ -34,3 +34,16 @@ const ( TypeMinKey = bsontype.MinKey TypeMaxKey = bsontype.MaxKey ) + +// BSON binary element subtypes as described in https://bsonspec.org/spec.html. +const ( + TypeBinaryGeneric = bsontype.BinaryGeneric + TypeBinaryFunction = bsontype.BinaryFunction + TypeBinaryBinaryOld = bsontype.BinaryBinaryOld + TypeBinaryUUIDOld = bsontype.BinaryUUIDOld + TypeBinaryUUID = bsontype.BinaryUUID + TypeBinaryMD5 = bsontype.BinaryMD5 + TypeBinaryEncrypted = bsontype.BinaryEncrypted + TypeBinaryColumn = bsontype.BinaryColumn + TypeBinaryUserDefined = bsontype.BinaryUserDefined +) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go index f936ba1836..66da17ee01 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go @@ -14,18 +14,26 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -// Unmarshaler is an interface implemented by types that can unmarshal a BSON -// document representation of themselves. The BSON bytes can be assumed to be -// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data -// after returning. +// Unmarshaler is the interface implemented by types that can unmarshal a BSON +// document representation of themselves. The input can be assumed to be a valid +// encoding of a BSON document. UnmarshalBSON must copy the JSON data if it +// wishes to retain the data after returning. +// +// Unmarshaler is only used to unmarshal full BSON documents. To create custom +// BSON unmarshaling behavior for individual values in a BSON document, +// implement the ValueUnmarshaler interface instead. type Unmarshaler interface { UnmarshalBSON([]byte) error } -// ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. +// ValueUnmarshaler is the interface implemented by types that can unmarshal a +// BSON value representation of themselves. The input can be assumed to be a +// valid encoding of a BSON value. UnmarshalBSONValue must copy the BSON value +// bytes if it wishes to retain the data after returning. +// +// ValueUnmarshaler is only used to unmarshal individual values in a BSON +// document. To create custom BSON unmarshaling behavior for an entire BSON +// document, implement the Unmarshaler interface instead. type ValueUnmarshaler interface { UnmarshalBSONValue(bsontype.Type, []byte) error } @@ -40,6 +48,16 @@ func Unmarshal(data []byte, val interface{}) error { // UnmarshalWithRegistry parses the BSON-encoded data using Registry r and // stores the result in the value pointed to by val. If val is nil or not // a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and specify the Registry by calling [Decoder.SetRegistry] instead: +// +// dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data)) +// if err != nil { +// panic(err) +// } +// dec.SetRegistry(reg) +// +// See [Decoder] for more examples. func UnmarshalWithRegistry(r *bsoncodec.Registry, data []byte, val interface{}) error { vr := bsonrw.NewBSONDocumentReader(data) return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val) @@ -48,11 +66,40 @@ func UnmarshalWithRegistry(r *bsoncodec.Registry, data []byte, val interface{}) // UnmarshalWithContext parses the BSON-encoded data using DecodeContext dc and // stores the result in the value pointed to by val. If val is nil or not // a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and use the Decoder configuration methods to set the desired unmarshal +// behavior instead: +// +// dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data)) +// if err != nil { +// panic(err) +// } +// dec.DefaultDocumentM() +// +// See [Decoder] for more examples. func UnmarshalWithContext(dc bsoncodec.DecodeContext, data []byte, val interface{}) error { vr := bsonrw.NewBSONDocumentReader(data) return unmarshalFromReader(dc, vr, val) } +// UnmarshalValue parses the BSON value of type t with bson.DefaultRegistry and +// stores the result in the value pointed to by val. If val is nil or not a pointer, +// UnmarshalValue returns an error. +func UnmarshalValue(t bsontype.Type, data []byte, val interface{}) error { + return UnmarshalValueWithRegistry(DefaultRegistry, t, data, val) +} + +// UnmarshalValueWithRegistry parses the BSON value of type t with registry r and +// stores the result in the value pointed to by val. If val is nil or not a pointer, +// UnmarshalValue returns an error. +// +// Deprecated: Using a custom registry to unmarshal individual BSON values will not be supported in +// Go Driver 2.0. +func UnmarshalValueWithRegistry(r *bsoncodec.Registry, t bsontype.Type, data []byte, val interface{}) error { + vr := bsonrw.NewBSONValueReader(t, data) + return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val) +} + // UnmarshalExtJSON parses the extended JSON-encoded data and stores the result // in the value pointed to by val. If val is nil or not a pointer, Unmarshal // returns InvalidUnmarshalError. @@ -63,6 +110,20 @@ func UnmarshalExtJSON(data []byte, canonical bool, val interface{}) error { // UnmarshalExtJSONWithRegistry parses the extended JSON-encoded data using // Registry r and stores the result in the value pointed to by val. If val is // nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and specify the Registry by calling [Decoder.SetRegistry] instead: +// +// vr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), true) +// if err != nil { +// panic(err) +// } +// dec, err := bson.NewDecoder(vr) +// if err != nil { +// panic(err) +// } +// dec.SetRegistry(reg) +// +// See [Decoder] for more examples. func UnmarshalExtJSONWithRegistry(r *bsoncodec.Registry, data []byte, canonical bool, val interface{}) error { ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical) if err != nil { @@ -75,6 +136,21 @@ func UnmarshalExtJSONWithRegistry(r *bsoncodec.Registry, data []byte, canonical // UnmarshalExtJSONWithContext parses the extended JSON-encoded data using // DecodeContext dc and stores the result in the value pointed to by val. If val is // nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and use the Decoder configuration methods to set the desired unmarshal +// behavior instead: +// +// vr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), true) +// if err != nil { +// panic(err) +// } +// dec, err := bson.NewDecoder(vr) +// if err != nil { +// panic(err) +// } +// dec.DefaultDocumentM() +// +// See [Decoder] for more examples. func UnmarshalExtJSONWithContext(dc bsoncodec.DecodeContext, data []byte, canonical bool, val interface{}) error { ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical) if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go index 8ea60ba3c6..6bc0afa700 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go @@ -7,10 +7,10 @@ package bsoncore import ( - "bytes" "fmt" "io" "strconv" + "strings" ) // NewArrayLengthError creates and returns an error for when the length of an array exceeds the @@ -53,7 +53,7 @@ func (a Array) DebugString() string { if len(a) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteString("Array") length, rem, _ := ReadLength(a) // We know we have enough bytes to read the length buf.WriteByte('(') @@ -69,7 +69,7 @@ func (a Array) DebugString() string { buf.WriteString(fmt.Sprintf("", length)) break } - fmt.Fprintf(&buf, "%s", elem.Value().DebugString()) + buf.WriteString(elem.Value().DebugString()) if length != 1 { buf.WriteByte(',') } @@ -85,7 +85,7 @@ func (a Array) String() string { if len(a) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteByte('[') length, rem, _ := ReadLength(a) // We know we have enough bytes to read the length @@ -100,7 +100,7 @@ func (a Array) String() string { if !ok { return "" } - fmt.Fprintf(&buf, "%s", elem.Value().String()) + buf.WriteString(elem.Value().String()) if length > 1 { buf.WriteByte(',') } diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go index 17aad6d71e..94d479428f 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go @@ -4,25 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package bsoncore contains functions that can be used to encode and decode BSON -// elements and values to or from a slice of bytes. These functions are aimed at -// allowing low level manipulation of BSON and can be used to build a higher -// level BSON library. -// -// The Read* functions within this package return the values of the element and -// a boolean indicating if the values are valid. A boolean was used instead of -// an error because any error that would be returned would be the same: not -// enough bytes. This library attempts to do no validation, it will only return -// false if there are not enough bytes for an item to be read. For example, the -// ReadDocument function checks the length, if that length is larger than the -// number of bytes available, it will return false, if there are enough bytes, it -// will return those bytes and true. It is the consumers responsibility to -// validate those bytes. -// -// The Append* functions within this package will append the type value to the -// given dst slice. If the slice has enough capacity, it will not grow the -// slice. The Append*Element functions within this package operate in the same -// way, but additionally append the BSON type and the key before the value. package bsoncore // import "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go new file mode 100644 index 0000000000..6837b53fc5 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go @@ -0,0 +1,29 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsoncore contains functions that can be used to encode and decode BSON +// elements and values to or from a slice of bytes. These functions are aimed at +// allowing low level manipulation of BSON and can be used to build a higher +// level BSON library. +// +// The Read* functions within this package return the values of the element and +// a boolean indicating if the values are valid. A boolean was used instead of +// an error because any error that would be returned would be the same: not +// enough bytes. This library attempts to do no validation, it will only return +// false if there are not enough bytes for an item to be read. For example, the +// ReadDocument function checks the length, if that length is larger than the +// number of bytes available, it will return false, if there are enough bytes, it +// will return those bytes and true. It is the consumers responsibility to +// validate those bytes. +// +// The Append* functions within this package will append the type value to the +// given dst slice. If the slice has enough capacity, it will not grow the +// slice. The Append*Element functions within this package operate in the same +// way, but additionally append the BSON type and the key before the value. +// +// Warning: Package bsoncore is unstable and there is no backward compatibility +// guarantee. It is experimental and subject to change. +package bsoncore diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go index d6e4bb069f..3f360f1ae1 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go @@ -7,11 +7,11 @@ package bsoncore import ( - "bytes" "errors" "fmt" "io" "strconv" + "strings" "go.mongodb.org/mongo-driver/bson/bsontype" ) @@ -237,7 +237,7 @@ func (d Document) DebugString() string { if len(d) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteString("Document") length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length buf.WriteByte('(') @@ -253,7 +253,7 @@ func (d Document) DebugString() string { buf.WriteString(fmt.Sprintf("", length)) break } - fmt.Fprintf(&buf, "%s ", elem.DebugString()) + buf.WriteString(elem.DebugString()) } buf.WriteByte('}') @@ -266,7 +266,7 @@ func (d Document) String() string { if len(d) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteByte('{') length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length @@ -285,7 +285,7 @@ func (d Document) String() string { if !ok { return "" } - fmt.Fprintf(&buf, "%s", elem.String()) + buf.WriteString(elem.String()) first = false } buf.WriteByte('}') diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go index 3acb4222b2..1fe0897c91 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go @@ -129,7 +129,7 @@ func (e Element) String() string { if !valid { return "" } - return fmt.Sprintf(`"%s": %v`, key, val) + return "\"" + string(key) + "\": " + val.String() } // DebugString outputs a human readable version of RawElement. It will attempt to stringify the diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go index 9cf87d6d77..69c1f9edbb 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go @@ -190,21 +190,14 @@ func (v Value) AsInt64OK() (int64, bool) { // AsFloat64 returns a BSON number as an float64. If the BSON type is not a numeric one, this method // will panic. // -// TODO(skriptble): Add support for Decimal128. -func (v Value) AsFloat64() float64 { return 0 } +// TODO(GODRIVER-2751): Implement AsFloat64. +// func (v Value) AsFloat64() float64 // AsFloat64OK functions the same as AsFloat64 but returns a boolean instead of panicking. False // indicates an error. // -// TODO(skriptble): Add support for Decimal128. -func (v Value) AsFloat64OK() (float64, bool) { return 0, false } - -// Add will add this value to another. This is currently only implemented for strings and numbers. -// If either value is a string, the other type is coerced into a string and added to the other. -// -// This method will alter v and will attempt to reuse the []byte of v. If the []byte is too small, -// it will be expanded. -func (v *Value) Add(v2 Value) error { return nil } +// TODO(GODRIVER-2751): Implement AsFloat64OK. +// func (v Value) AsFloat64OK() (float64, bool) // Equal compaes v to v2 and returns true if they are equal. func (v Value) Equal(v2 Value) bool { diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index f3355c852b..895c7664be 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -14,12 +14,9 @@ go.work.sum gen/ /example/dice/dice -/example/fib/fib -/example/fib/traces.txt -/example/jaeger/jaeger /example/namedtracer/namedtracer +/example/otel-collector/otel-collector /example/opencensus/opencensus /example/passthrough/passthrough /example/prometheus/prometheus /example/zipkin/zipkin -/example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 6e8eeec00f..a62511f382 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -12,8 +12,9 @@ linters: - depguard - errcheck - godot - - gofmt + - gofumpt - goimports + - gosec - gosimple - govet - ineffassign @@ -53,6 +54,20 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive + # It's okay to not run gosec in a test. + - path: _test\.go + linters: + - gosec + # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - text: "G404:" + linters: + - gosec + # Igonoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - text: "G402: TLS MinVersion too low." + linters: + - gosec include: # revive exported should have comment or be unexported. - EXC0012 diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 3e5c35b5dc..24874f856e 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,85 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.21.0/0.44.0] 2023-11-16 + +### Removed + +- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706) +- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707) +- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708) +- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723) + +### Fixed + +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719) +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719) + +## [1.20.0/0.43.0] 2023-11-10 + +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. + +### Added + +- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) +- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) +- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) +- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) +- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) +- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) +- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) +- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) +- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) +- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) +- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. + Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) +- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) + +### Changed + +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) +- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. + This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. + This extends the `Tracer` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. + This extends the `Span` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) + +### Fixed + +- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) +- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) + ## [1.19.0/0.42.0/0.0.7] 2023-09-28 This release contains the first stable release of the OpenTelemetry Go [metric SDK]. @@ -2656,7 +2735,9 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.19.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...HEAD +[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 +[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 [1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 [1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 [1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 @@ -2731,7 +2812,7 @@ It contains api and sdk for trace and meter. [Go 1.20]: https://go.dev/doc/go1.20 [Go 1.19]: https://go.dev/doc/go1.19 [Go 1.18]: https://go.dev/doc/go1.18 -[Go 1.19]: https://go.dev/doc/go1.19 [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric +[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index a00dbca7b0..850606ae69 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -90,6 +90,10 @@ git push Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull request ID to the entry you added to `CHANGELOG.md`. +Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request. +Rewriting Git history makes it difficult to keep track of iterations during code review. +All pull requests are squashed to a single commit upon merge to `main`. + ### How to Receive Comments * If the PR is not ready for review, please put `[WIP]` in the title, diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 5c311706b0..35fc189961 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -77,6 +77,9 @@ $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl GORELEASE = $(TOOLS)/gorelease $(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease +GOVULNCHECK = $(TOOLS)/govulncheck +$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck + .PHONY: tools tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) @@ -189,6 +192,18 @@ test-coverage: | $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt +# Adding a directory will include all benchmarks in that direcotry if a filter is not specified. +BENCHMARK_TARGETS := sdk/trace +.PHONY: benchmark +benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) +BENCHMARK_FILTER = . +# You can override the filter for a particular directory by adding a rule here. +benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark/%: + @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + && cd $* \ + $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix golangci-lint-fix: golangci-lint @@ -216,7 +231,7 @@ go-mod-tidy/%: | crosslink lint-modules: go-mod-tidy .PHONY: lint -lint: misspell lint-modules golangci-lint +lint: misspell lint-modules golangci-lint govulncheck .PHONY: vanity-import-check vanity-import-check: | $(PORTO) @@ -226,6 +241,14 @@ vanity-import-check: | $(PORTO) misspell: | $(MISSPELL) @$(MISSPELL) -w $(ALL_DOCS) +.PHONY: govulncheck +govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) +govulncheck/%: DIR=$* +govulncheck/%: | $(GOVULNCHECK) + @echo "govulncheck ./... in $(DIR)" \ + && cd $(DIR) \ + && $(GOVULNCHECK) ./... + .PHONY: codespell codespell: | $(CODESPELL) @$(DOCKERPY) $(CODESPELL) @@ -289,3 +312,7 @@ COMMIT ?= "HEAD" add-tags: | $(MULTIMOD) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + +.PHONY: lint-markdown +lint-markdown: + docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 634326ef83..2c5b0cc28a 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -11,16 +11,13 @@ It provides a set of APIs to directly measure performance and behavior of your s ## Project Status -| Signal | Status | Project | -|---------|------------|-----------------------| -| Traces | Stable | N/A | -| Metrics | Mixed [1] | [Go: Metric SDK (GA)] | -| Logs | Frozen [2] | N/A | +| Signal | Status | +|---------|------------| +| Traces | Stable | +| Metrics | Stable | +| Logs | Design [1] | -[Go: Metric SDK (GA)]: https://github.com/orgs/open-telemetry/projects/34 - -- [1]: [Metrics API](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is Stable. [Metrics SDK](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) is Beta. -- [2]: The Logs signal development is halted for this project while we stabilize the Metrics SDK. +- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)). No Logs Pull Requests are currently being accepted. Progress and status specific to this repository is tracked in our diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 9e6b3b7b52..84532cb1da 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -254,7 +254,7 @@ func NewMember(key, value string, props ...Property) (Member, error) { if err := m.validate(); err != nil { return newInvalidMember(), err } - decodedValue, err := url.QueryUnescape(value) + decodedValue, err := url.PathUnescape(value) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -301,7 +301,7 @@ func parseMember(member string) (Member, error) { // when converting the header into a data structure." key = strings.TrimSpace(k) var err error - value, err = url.QueryUnescape(strings.TrimSpace(v)) + value, err = url.PathUnescape(strings.TrimSpace(v)) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %q", err, value) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index a33eded872..ebb13c2067 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -34,11 +34,13 @@ type afCounter struct { name string opts []metric.Float64ObservableCounterOption - delegate atomic.Value //metric.Float64ObservableCounter + delegate atomic.Value // metric.Float64ObservableCounter } -var _ unwrapper = (*afCounter)(nil) -var _ metric.Float64ObservableCounter = (*afCounter)(nil) +var ( + _ unwrapper = (*afCounter)(nil) + _ metric.Float64ObservableCounter = (*afCounter)(nil) +) func (i *afCounter) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableCounter(i.name, i.opts...) @@ -63,11 +65,13 @@ type afUpDownCounter struct { name string opts []metric.Float64ObservableUpDownCounterOption - delegate atomic.Value //metric.Float64ObservableUpDownCounter + delegate atomic.Value // metric.Float64ObservableUpDownCounter } -var _ unwrapper = (*afUpDownCounter)(nil) -var _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +var ( + _ unwrapper = (*afUpDownCounter)(nil) + _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +) func (i *afUpDownCounter) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) @@ -92,11 +96,13 @@ type afGauge struct { name string opts []metric.Float64ObservableGaugeOption - delegate atomic.Value //metric.Float64ObservableGauge + delegate atomic.Value // metric.Float64ObservableGauge } -var _ unwrapper = (*afGauge)(nil) -var _ metric.Float64ObservableGauge = (*afGauge)(nil) +var ( + _ unwrapper = (*afGauge)(nil) + _ metric.Float64ObservableGauge = (*afGauge)(nil) +) func (i *afGauge) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableGauge(i.name, i.opts...) @@ -121,11 +127,13 @@ type aiCounter struct { name string opts []metric.Int64ObservableCounterOption - delegate atomic.Value //metric.Int64ObservableCounter + delegate atomic.Value // metric.Int64ObservableCounter } -var _ unwrapper = (*aiCounter)(nil) -var _ metric.Int64ObservableCounter = (*aiCounter)(nil) +var ( + _ unwrapper = (*aiCounter)(nil) + _ metric.Int64ObservableCounter = (*aiCounter)(nil) +) func (i *aiCounter) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableCounter(i.name, i.opts...) @@ -150,11 +158,13 @@ type aiUpDownCounter struct { name string opts []metric.Int64ObservableUpDownCounterOption - delegate atomic.Value //metric.Int64ObservableUpDownCounter + delegate atomic.Value // metric.Int64ObservableUpDownCounter } -var _ unwrapper = (*aiUpDownCounter)(nil) -var _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +var ( + _ unwrapper = (*aiUpDownCounter)(nil) + _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +) func (i *aiUpDownCounter) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) @@ -179,11 +189,13 @@ type aiGauge struct { name string opts []metric.Int64ObservableGaugeOption - delegate atomic.Value //metric.Int64ObservableGauge + delegate atomic.Value // metric.Int64ObservableGauge } -var _ unwrapper = (*aiGauge)(nil) -var _ metric.Int64ObservableGauge = (*aiGauge)(nil) +var ( + _ unwrapper = (*aiGauge)(nil) + _ metric.Int64ObservableGauge = (*aiGauge)(nil) +) func (i *aiGauge) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableGauge(i.name, i.opts...) @@ -208,7 +220,7 @@ type sfCounter struct { name string opts []metric.Float64CounterOption - delegate atomic.Value //metric.Float64Counter + delegate atomic.Value // metric.Float64Counter } var _ metric.Float64Counter = (*sfCounter)(nil) @@ -234,7 +246,7 @@ type sfUpDownCounter struct { name string opts []metric.Float64UpDownCounterOption - delegate atomic.Value //metric.Float64UpDownCounter + delegate atomic.Value // metric.Float64UpDownCounter } var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) @@ -260,7 +272,7 @@ type sfHistogram struct { name string opts []metric.Float64HistogramOption - delegate atomic.Value //metric.Float64Histogram + delegate atomic.Value // metric.Float64Histogram } var _ metric.Float64Histogram = (*sfHistogram)(nil) @@ -286,7 +298,7 @@ type siCounter struct { name string opts []metric.Int64CounterOption - delegate atomic.Value //metric.Int64Counter + delegate atomic.Value // metric.Int64Counter } var _ metric.Int64Counter = (*siCounter)(nil) @@ -312,7 +324,7 @@ type siUpDownCounter struct { name string opts []metric.Int64UpDownCounterOption - delegate atomic.Value //metric.Int64UpDownCounter + delegate atomic.Value // metric.Int64UpDownCounter } var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) @@ -338,7 +350,7 @@ type siHistogram struct { name string opts []metric.Int64HistogramOption - delegate atomic.Value //metric.Int64Histogram + delegate atomic.Value // metric.Int64Histogram } var _ metric.Int64Histogram = (*siHistogram)(nil) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 5f008d0982..3f61ec12a3 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -39,6 +39,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" ) // tracerProvider is a placeholder for a configured SDK TracerProvider. @@ -46,6 +47,8 @@ import ( // All TracerProvider functionality is forwarded to a delegate once // configured. type tracerProvider struct { + embedded.TracerProvider + mtx sync.Mutex tracers map[il]*tracer delegate trace.TracerProvider @@ -119,6 +122,8 @@ type il struct { // All Tracer functionality is forwarded to a delegate once configured. // Otherwise, all functionality is forwarded to a NoopTracer. type tracer struct { + embedded.Tracer + name string opts []trace.TracerOption provider *tracerProvider @@ -156,6 +161,8 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart // SpanContext. It performs no operations other than to return the wrapped // SpanContext. type nonRecordingSpan struct { + embedded.Span + sc trace.SpanContext tracer *tracer } diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index ae24e448d9..54716e13b3 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -149,7 +149,7 @@ of [go.opentelemetry.io/otel/metric]. Finally, an author can embed another implementation in theirs. The embedded implementation will be used for methods not defined by the author. For example, -an author who want to default to silently dropping the call can use +an author who wants to default to silently dropping the call can use [go.opentelemetry.io/otel/metric/noop]: import "go.opentelemetry.io/otel/metric/noop" diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index cdca00058c..be89cd5334 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -39,6 +39,12 @@ type InstrumentOption interface { Float64ObservableGaugeOption } +// HistogramOption applies options to histogram instruments. +type HistogramOption interface { + Int64HistogramOption + Float64HistogramOption +} + type descOpt string func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { @@ -171,6 +177,23 @@ func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64Ob // The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. func WithUnit(u string) InstrumentOption { return unitOpt(u) } +// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. +// +// This option is considered "advisory", and may be ignored by API implementations. +func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } + +type bucketOpt []float64 + +func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + // AddOption applies options to an addition measurement. See // [MeasurementOption] for other options that can be used as an AddOption. type AddOption interface { diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index f0b063721d..0a4825ae6a 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -147,8 +147,9 @@ type Float64Histogram interface { // Float64HistogramConfig contains options for synchronous counter instruments // that record int64 values. type Float64HistogramConfig struct { - description string - unit string + description string + unit string + explicitBucketBoundaries []float64 } // NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all @@ -171,6 +172,11 @@ func (c Float64HistogramConfig) Unit() string { return c.unit } +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + // Float64HistogramOption applies options to a [Float64HistogramConfig]. See // [InstrumentOption] for other options that can be used as a // Float64HistogramOption. diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go index 6f508eb66d..56667d32fc 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -147,8 +147,9 @@ type Int64Histogram interface { // Int64HistogramConfig contains options for synchronous counter instruments // that record int64 values. type Int64HistogramConfig struct { - description string - unit string + description string + unit string + explicitBucketBoundaries []float64 } // NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts @@ -171,6 +172,11 @@ func (c Int64HistogramConfig) Unit() string { return c.unit } +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + // Int64HistogramOption applies options to a [Int64HistogramConfig]. See // [InstrumentOption] for other options that can be used as an // Int64HistogramOption. diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 902692da08..75a8f3435a 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -40,8 +40,10 @@ const ( // their proprietary information. type TraceContext struct{} -var _ TextMapPropagator = TraceContext{} -var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") +var ( + _ TextMapPropagator = TraceContext{} + traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") +) // Inject set tracecontext from the Context into the carrier. func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index ddff454685..e0a43e1384 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.5 +codespell==2.2.6 diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index cb3efbb9ad..3aadc66cf7 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -268,6 +268,7 @@ func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { c.stackTrace = bool(o) return c } + func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { c.stackTrace = bool(o) return c diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index ab0346f966..440f3d7565 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -62,5 +62,69 @@ a default. defer span.End() // ... } + +# API Implementations + +This package does not conform to the standard Go versioning policy; all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/trace/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/trace/embedded" + + type TracerProvider struct { + embedded.TracerProvider + // ... + } + +If an author wants the default behavior of their implementations to panic, they +can embed the API interface directly. + + import "go.opentelemetry.io/otel/trace" + + type TracerProvider struct { + trace.TracerProvider + // ... + } + +This option is not recommended. It will lead to publishing packages that +contain runtime panics when users update to newer versions of +[go.opentelemetry.io/otel/trace], which may be done with a trasitive +dependency. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/trace/noop]: + + import "go.opentelemetry.io/otel/trace/noop" + + type TracerProvider struct { + noop.TracerProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. */ package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go new file mode 100644 index 0000000000..898db5a754 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// trace API]. +// +// Implementers of the [OpenTelemetry trace API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry trace API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace +package embedded // import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider is embedded in +// [go.opentelemetry.io/otel/trace.TracerProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type TracerProvider interface{ tracerProvider() } + +// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Tracer interface{ tracer() } + +// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Span interface{ span() } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index 7cf6c7f3ef..c125491cae 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -19,16 +19,20 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" ) // NewNoopTracerProvider returns an implementation of TracerProvider that // performs no operations. The Tracer and Spans created from the returned // TracerProvider also perform no operations. +// +// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] +// instead. func NewNoopTracerProvider() TracerProvider { return noopTracerProvider{} } -type noopTracerProvider struct{} +type noopTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = noopTracerProvider{} @@ -38,7 +42,7 @@ func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { } // noopTracer is an implementation of Tracer that performs no operations. -type noopTracer struct{} +type noopTracer struct{ embedded.Tracer } var _ Tracer = noopTracer{} @@ -54,7 +58,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption } // noopSpan is an implementation of Span that performs no operations. -type noopSpan struct{} +type noopSpan struct{ embedded.Span } var _ Span = noopSpan{} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 4aa94f79f4..26a4b2260e 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -22,6 +22,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -48,8 +49,10 @@ func (e errorConst) Error() string { // nolint:revive // revive complains about stutter of `trace.TraceID`. type TraceID [16]byte -var nilTraceID TraceID -var _ json.Marshaler = nilTraceID +var ( + nilTraceID TraceID + _ json.Marshaler = nilTraceID +) // IsValid checks whether the trace TraceID is valid. A valid trace ID does // not consist of zeros only. @@ -71,8 +74,10 @@ func (t TraceID) String() string { // SpanID is a unique identity of a span in a trace. type SpanID [8]byte -var nilSpanID SpanID -var _ json.Marshaler = nilSpanID +var ( + nilSpanID SpanID + _ json.Marshaler = nilSpanID +) // IsValid checks whether the SpanID is valid. A valid SpanID does not consist // of zeros only. @@ -338,8 +343,15 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { // create a Span and it is then up to the operation the Span represents to // properly end the Span when the operation itself ends. // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + // End completes the Span. The Span is considered complete and ready to be // delivered through the rest of the telemetry pipeline after this method // is called. Therefore, updates to the Span are not allowed after this @@ -486,8 +498,15 @@ func (sk SpanKind) String() string { // Tracer is the creator of Spans. // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + // Start creates a span and a context.Context containing the newly-created span. // // If the context.Context provided in `ctx` contains a Span then the newly-created @@ -518,8 +537,15 @@ type Tracer interface { // at runtime from its users or it can simply use the globally registered one // (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + // Tracer returns a unique Tracer scoped to be used by instrumentation code // to trace computational workflows. The scope and identity of that // instrumentation code is uniquely defined by the name and options passed. diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index ca68a82e5f..d1e47ca2fa 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -28,9 +28,9 @@ const ( // based on the W3C Trace Context specification, see // https://www.w3.org/TR/trace-context-1/#tracestate-header - noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` - withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` - valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` + noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]*` + withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]*@[a-z][_0-9a-z\-\*\/]*` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e]` errInvalidKey errorConst = "invalid tracestate key" errInvalidValue errorConst = "invalid tracestate value" @@ -40,9 +40,10 @@ const ( ) var ( - keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) - valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) - memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) + noTenantKeyRe = regexp.MustCompile(`^` + noTenantKeyFormat + `$`) + withTenantKeyRe = regexp.MustCompile(`^` + withTenantKeyFormat + `$`) + valueRe = regexp.MustCompile(`^` + valueFormat + `$`) + memberRe = regexp.MustCompile(`^\s*((?:` + noTenantKeyFormat + `)|(?:` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) ) type member struct { @@ -51,10 +52,19 @@ type member struct { } func newMember(key, value string) (member, error) { - if !keyRe.MatchString(key) { + if len(key) > 256 { return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) } - if !valueRe.MatchString(value) { + if !noTenantKeyRe.MatchString(key) { + if !withTenantKeyRe.MatchString(key) { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } + atIndex := strings.LastIndex(key, "@") + if atIndex > 241 || len(key)-1-atIndex > 14 { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } + } + if len(value) > 256 || !valueRe.MatchString(value) { return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) } return member{Key: key, Value: value}, nil @@ -62,14 +72,14 @@ func newMember(key, value string) (member, error) { func parseMember(m string) (member, error) { matches := memberRe.FindStringSubmatch(m) - if len(matches) != 5 { + if len(matches) != 3 { return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) } - - return member{ - Key: matches[1], - Value: matches[4], - }, nil + result, e := newMember(matches[1], matches[2]) + if e != nil { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + return result, nil } // String encodes member into a string compliant with the W3C Trace Context diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ad64e19967..e2f743585d 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.19.0" + return "1.21.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 7d21276924..3c153c9d6f 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,13 +14,12 @@ module-sets: stable-v1: - version: v1.19.0 + version: v1.21.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/fib - go.opentelemetry.io/otel/example/namedtracer - go.opentelemetry.io/otel/example/otel-collector - go.opentelemetry.io/otel/example/passthrough @@ -35,14 +34,12 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.42.0 + version: v0.44.0 modules: - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/prometheus - - go.opentelemetry.io/otel/example/view - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/prometheus diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go index 6fc2838a3f..2492f796af 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1.go +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -733,13 +733,14 @@ func (s *String) ReadOptionalASN1OctetString(out *[]byte, outPresent *bool, tag return true } -// ReadOptionalASN1Boolean sets *out to the value of the next ASN.1 BOOLEAN or, -// if the next bytes are not an ASN.1 BOOLEAN, to the value of defaultValue. -// It reports whether the operation was successful. -func (s *String) ReadOptionalASN1Boolean(out *bool, defaultValue bool) bool { +// ReadOptionalASN1Boolean attempts to read an optional ASN.1 BOOLEAN +// explicitly tagged with tag into out and advances. If no element with a +// matching tag is present, it sets "out" to defaultValue instead. It reports +// whether the read was successful. +func (s *String) ReadOptionalASN1Boolean(out *bool, tag asn1.Tag, defaultValue bool) bool { var present bool var child String - if !s.ReadOptionalASN1(&child, &present, asn1.BOOLEAN) { + if !s.ReadOptionalASN1(&child, &present, tag) { return false } @@ -748,7 +749,7 @@ func (s *String) ReadOptionalASN1Boolean(out *bool, defaultValue bool) bool { return true } - return s.ReadASN1Boolean(out) + return child.ReadASN1Boolean(out) } func (s *String) readASN1(out *String, outTag *asn1.Tag, skipHeader bool) bool { diff --git a/vendor/golang.org/x/exp/slog/handler.go b/vendor/golang.org/x/exp/slog/handler.go index 74f88738c9..bd635cb818 100644 --- a/vendor/golang.org/x/exp/slog/handler.go +++ b/vendor/golang.org/x/exp/slog/handler.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "io" + "reflect" "strconv" "sync" "time" @@ -504,6 +505,23 @@ func (s *handleState) appendString(str string) { } func (s *handleState) appendValue(v Value) { + defer func() { + if r := recover(); r != nil { + // If it panics with a nil pointer, the most likely cases are + // an encoding.TextMarshaler or error fails to guard against nil, + // in which case "" seems to be the feasible choice. + // + // Adapted from the code in fmt/print.go. + if v := reflect.ValueOf(v.any); v.Kind() == reflect.Pointer && v.IsNil() { + s.appendString("") + return + } + + // Otherwise just print the original panic message. + s.appendString(fmt.Sprintf("!PANIC: %v", r)) + } + }() + var err error if s.h.json { err = appendJSONValue(s, v) diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index cf66309c4a..0000000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 2cb9c408f2..0000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index 64d31ecc3e..0000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 -// +build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 7b6b685114..0000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index 1f9715341f..0000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 -// +build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go new file mode 100644 index 0000000000..cd0a8ac154 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/atom.go @@ -0,0 +1,78 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atom provides integer codes (also known as atoms) for a fixed set of +// frequently occurring HTML strings: tag names and attribute keys such as "p" +// and "id". +// +// Sharing an atom's name between all elements with the same tag can result in +// fewer string allocations when tokenizing and parsing HTML. Integer +// comparisons are also generally faster than string comparisons. +// +// The value of an atom's particular code is not guaranteed to stay the same +// between versions of this package. Neither is any ordering guaranteed: +// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to +// be dense. The only guarantees are that e.g. looking up "div" will yield +// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. +package atom // import "golang.org/x/net/html/atom" + +// Atom is an integer code for a string. The zero value maps to "". +type Atom uint32 + +// String returns the atom's name. +func (a Atom) String() string { + start := uint32(a >> 8) + n := uint32(a & 0xff) + if start+n > uint32(len(atomText)) { + return "" + } + return atomText[start : start+n] +} + +func (a Atom) string() string { + return atomText[a>>8 : a>>8+a&0xff] +} + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s []byte) uint32 { + for i := range s { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +func match(s string, t []byte) bool { + for i, c := range t { + if s[i] != c { + return false + } + } + return true +} + +// Lookup returns the atom whose name is s. It returns zero if there is no +// such atom. The lookup is case sensitive. +func Lookup(s []byte) Atom { + if len(s) == 0 || len(s) > maxAtomLen { + return 0 + } + h := fnv(hash0, s) + if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + return 0 +} + +// String returns a string whose contents are equal to s. In that sense, it is +// equivalent to string(s) but may be more efficient. +func String(s []byte) string { + if a := Lookup(s); a != 0 { + return a.String() + } + return string(s) +} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go new file mode 100644 index 0000000000..2a938864cb --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/table.go @@ -0,0 +1,783 @@ +// Code generated by go generate gen.go; DO NOT EDIT. + +//go:generate go run gen.go + +package atom + +const ( + A Atom = 0x1 + Abbr Atom = 0x4 + Accept Atom = 0x1a06 + AcceptCharset Atom = 0x1a0e + Accesskey Atom = 0x2c09 + Acronym Atom = 0xaa07 + Action Atom = 0x27206 + Address Atom = 0x6f307 + Align Atom = 0xb105 + Allowfullscreen Atom = 0x2080f + Allowpaymentrequest Atom = 0xc113 + Allowusermedia Atom = 0xdd0e + Alt Atom = 0xf303 + Annotation Atom = 0x1c90a + AnnotationXml Atom = 0x1c90e + Applet Atom = 0x31906 + Area Atom = 0x35604 + Article Atom = 0x3fc07 + As Atom = 0x3c02 + Aside Atom = 0x10705 + Async Atom = 0xff05 + Audio Atom = 0x11505 + Autocomplete Atom = 0x2780c + Autofocus Atom = 0x12109 + Autoplay Atom = 0x13c08 + B Atom = 0x101 + Base Atom = 0x3b04 + Basefont Atom = 0x3b08 + Bdi Atom = 0xba03 + Bdo Atom = 0x14b03 + Bgsound Atom = 0x15e07 + Big Atom = 0x17003 + Blink Atom = 0x17305 + Blockquote Atom = 0x1870a + Body Atom = 0x2804 + Br Atom = 0x202 + Button Atom = 0x19106 + Canvas Atom = 0x10306 + Caption Atom = 0x23107 + Center Atom = 0x22006 + Challenge Atom = 0x29b09 + Charset Atom = 0x2107 + Checked Atom = 0x47907 + Cite Atom = 0x19c04 + Class Atom = 0x56405 + Code Atom = 0x5c504 + Col Atom = 0x1ab03 + Colgroup Atom = 0x1ab08 + Color Atom = 0x1bf05 + Cols Atom = 0x1c404 + Colspan Atom = 0x1c407 + Command Atom = 0x1d707 + Content Atom = 0x58b07 + Contenteditable Atom = 0x58b0f + Contextmenu Atom = 0x3800b + Controls Atom = 0x1de08 + Coords Atom = 0x1ea06 + Crossorigin Atom = 0x1fb0b + Data Atom = 0x4a504 + Datalist Atom = 0x4a508 + Datetime Atom = 0x2b808 + Dd Atom = 0x2d702 + Default Atom = 0x10a07 + Defer Atom = 0x5c705 + Del Atom = 0x45203 + Desc Atom = 0x56104 + Details Atom = 0x7207 + Dfn Atom = 0x8703 + Dialog Atom = 0xbb06 + Dir Atom = 0x9303 + Dirname Atom = 0x9307 + Disabled Atom = 0x16408 + Div Atom = 0x16b03 + Dl Atom = 0x5e602 + Download Atom = 0x46308 + Draggable Atom = 0x17a09 + Dropzone Atom = 0x40508 + Dt Atom = 0x64b02 + Em Atom = 0x6e02 + Embed Atom = 0x6e05 + Enctype Atom = 0x28d07 + Face Atom = 0x21e04 + Fieldset Atom = 0x22608 + Figcaption Atom = 0x22e0a + Figure Atom = 0x24806 + Font Atom = 0x3f04 + Footer Atom = 0xf606 + For Atom = 0x25403 + ForeignObject Atom = 0x2540d + Foreignobject Atom = 0x2610d + Form Atom = 0x26e04 + Formaction Atom = 0x26e0a + Formenctype Atom = 0x2890b + Formmethod Atom = 0x2a40a + Formnovalidate Atom = 0x2ae0e + Formtarget Atom = 0x2c00a + Frame Atom = 0x8b05 + Frameset Atom = 0x8b08 + H1 Atom = 0x15c02 + H2 Atom = 0x2de02 + H3 Atom = 0x30d02 + H4 Atom = 0x34502 + H5 Atom = 0x34f02 + H6 Atom = 0x64d02 + Head Atom = 0x33104 + Header Atom = 0x33106 + Headers Atom = 0x33107 + Height Atom = 0x5206 + Hgroup Atom = 0x2ca06 + Hidden Atom = 0x2d506 + High Atom = 0x2db04 + Hr Atom = 0x15702 + Href Atom = 0x2e004 + Hreflang Atom = 0x2e008 + Html Atom = 0x5604 + HttpEquiv Atom = 0x2e80a + I Atom = 0x601 + Icon Atom = 0x58a04 + Id Atom = 0x10902 + Iframe Atom = 0x2fc06 + Image Atom = 0x30205 + Img Atom = 0x30703 + Input Atom = 0x44b05 + Inputmode Atom = 0x44b09 + Ins Atom = 0x20403 + Integrity Atom = 0x23f09 + Is Atom = 0x16502 + Isindex Atom = 0x30f07 + Ismap Atom = 0x31605 + Itemid Atom = 0x38b06 + Itemprop Atom = 0x19d08 + Itemref Atom = 0x3cd07 + Itemscope Atom = 0x67109 + Itemtype Atom = 0x31f08 + Kbd Atom = 0xb903 + Keygen Atom = 0x3206 + Keytype Atom = 0xd607 + Kind Atom = 0x17704 + Label Atom = 0x5905 + Lang Atom = 0x2e404 + Legend Atom = 0x18106 + Li Atom = 0xb202 + Link Atom = 0x17404 + List Atom = 0x4a904 + Listing Atom = 0x4a907 + Loop Atom = 0x5d04 + Low Atom = 0xc303 + Main Atom = 0x1004 + Malignmark Atom = 0xb00a + Manifest Atom = 0x6d708 + Map Atom = 0x31803 + Mark Atom = 0xb604 + Marquee Atom = 0x32707 + Math Atom = 0x32e04 + Max Atom = 0x33d03 + Maxlength Atom = 0x33d09 + Media Atom = 0xe605 + Mediagroup Atom = 0xe60a + Menu Atom = 0x38704 + Menuitem Atom = 0x38708 + Meta Atom = 0x4b804 + Meter Atom = 0x9805 + Method Atom = 0x2a806 + Mglyph Atom = 0x30806 + Mi Atom = 0x34702 + Min Atom = 0x34703 + Minlength Atom = 0x34709 + Mn Atom = 0x2b102 + Mo Atom = 0xa402 + Ms Atom = 0x67402 + Mtext Atom = 0x35105 + Multiple Atom = 0x35f08 + Muted Atom = 0x36705 + Name Atom = 0x9604 + Nav Atom = 0x1303 + Nobr Atom = 0x3704 + Noembed Atom = 0x6c07 + Noframes Atom = 0x8908 + Nomodule Atom = 0xa208 + Nonce Atom = 0x1a605 + Noscript Atom = 0x21608 + Novalidate Atom = 0x2b20a + Object Atom = 0x26806 + Ol Atom = 0x13702 + Onabort Atom = 0x19507 + Onafterprint Atom = 0x2360c + Onautocomplete Atom = 0x2760e + Onautocompleteerror Atom = 0x27613 + Onauxclick Atom = 0x61f0a + Onbeforeprint Atom = 0x69e0d + Onbeforeunload Atom = 0x6e70e + Onblur Atom = 0x56d06 + Oncancel Atom = 0x11908 + Oncanplay Atom = 0x14d09 + Oncanplaythrough Atom = 0x14d10 + Onchange Atom = 0x41b08 + Onclick Atom = 0x2f507 + Onclose Atom = 0x36c07 + Oncontextmenu Atom = 0x37e0d + Oncopy Atom = 0x39106 + Oncuechange Atom = 0x3970b + Oncut Atom = 0x3a205 + Ondblclick Atom = 0x3a70a + Ondrag Atom = 0x3b106 + Ondragend Atom = 0x3b109 + Ondragenter Atom = 0x3ba0b + Ondragexit Atom = 0x3c50a + Ondragleave Atom = 0x3df0b + Ondragover Atom = 0x3ea0a + Ondragstart Atom = 0x3f40b + Ondrop Atom = 0x40306 + Ondurationchange Atom = 0x41310 + Onemptied Atom = 0x40a09 + Onended Atom = 0x42307 + Onerror Atom = 0x42a07 + Onfocus Atom = 0x43107 + Onhashchange Atom = 0x43d0c + Oninput Atom = 0x44907 + Oninvalid Atom = 0x45509 + Onkeydown Atom = 0x45e09 + Onkeypress Atom = 0x46b0a + Onkeyup Atom = 0x48007 + Onlanguagechange Atom = 0x48d10 + Onload Atom = 0x49d06 + Onloadeddata Atom = 0x49d0c + Onloadedmetadata Atom = 0x4b010 + Onloadend Atom = 0x4c609 + Onloadstart Atom = 0x4cf0b + Onmessage Atom = 0x4da09 + Onmessageerror Atom = 0x4da0e + Onmousedown Atom = 0x4e80b + Onmouseenter Atom = 0x4f30c + Onmouseleave Atom = 0x4ff0c + Onmousemove Atom = 0x50b0b + Onmouseout Atom = 0x5160a + Onmouseover Atom = 0x5230b + Onmouseup Atom = 0x52e09 + Onmousewheel Atom = 0x53c0c + Onoffline Atom = 0x54809 + Ononline Atom = 0x55108 + Onpagehide Atom = 0x5590a + Onpageshow Atom = 0x5730a + Onpaste Atom = 0x57f07 + Onpause Atom = 0x59a07 + Onplay Atom = 0x5a406 + Onplaying Atom = 0x5a409 + Onpopstate Atom = 0x5ad0a + Onprogress Atom = 0x5b70a + Onratechange Atom = 0x5cc0c + Onrejectionhandled Atom = 0x5d812 + Onreset Atom = 0x5ea07 + Onresize Atom = 0x5f108 + Onscroll Atom = 0x60008 + Onsecuritypolicyviolation Atom = 0x60819 + Onseeked Atom = 0x62908 + Onseeking Atom = 0x63109 + Onselect Atom = 0x63a08 + Onshow Atom = 0x64406 + Onsort Atom = 0x64f06 + Onstalled Atom = 0x65909 + Onstorage Atom = 0x66209 + Onsubmit Atom = 0x66b08 + Onsuspend Atom = 0x67b09 + Ontimeupdate Atom = 0x400c + Ontoggle Atom = 0x68408 + Onunhandledrejection Atom = 0x68c14 + Onunload Atom = 0x6ab08 + Onvolumechange Atom = 0x6b30e + Onwaiting Atom = 0x6c109 + Onwheel Atom = 0x6ca07 + Open Atom = 0x1a304 + Optgroup Atom = 0x5f08 + Optimum Atom = 0x6d107 + Option Atom = 0x6e306 + Output Atom = 0x51d06 + P Atom = 0xc01 + Param Atom = 0xc05 + Pattern Atom = 0x6607 + Picture Atom = 0x7b07 + Ping Atom = 0xef04 + Placeholder Atom = 0x1310b + Plaintext Atom = 0x1b209 + Playsinline Atom = 0x1400b + Poster Atom = 0x2cf06 + Pre Atom = 0x47003 + Preload Atom = 0x48607 + Progress Atom = 0x5b908 + Prompt Atom = 0x53606 + Public Atom = 0x58606 + Q Atom = 0xcf01 + Radiogroup Atom = 0x30a + Rb Atom = 0x3a02 + Readonly Atom = 0x35708 + Referrerpolicy Atom = 0x3d10e + Rel Atom = 0x48703 + Required Atom = 0x24c08 + Reversed Atom = 0x8008 + Rows Atom = 0x9c04 + Rowspan Atom = 0x9c07 + Rp Atom = 0x23c02 + Rt Atom = 0x19a02 + Rtc Atom = 0x19a03 + Ruby Atom = 0xfb04 + S Atom = 0x2501 + Samp Atom = 0x7804 + Sandbox Atom = 0x12907 + Scope Atom = 0x67505 + Scoped Atom = 0x67506 + Script Atom = 0x21806 + Seamless Atom = 0x37108 + Section Atom = 0x56807 + Select Atom = 0x63c06 + Selected Atom = 0x63c08 + Shape Atom = 0x1e505 + Size Atom = 0x5f504 + Sizes Atom = 0x5f505 + Slot Atom = 0x1ef04 + Small Atom = 0x20605 + Sortable Atom = 0x65108 + Sorted Atom = 0x33706 + Source Atom = 0x37806 + Spacer Atom = 0x43706 + Span Atom = 0x9f04 + Spellcheck Atom = 0x4740a + Src Atom = 0x5c003 + Srcdoc Atom = 0x5c006 + Srclang Atom = 0x5f907 + Srcset Atom = 0x6f906 + Start Atom = 0x3fa05 + Step Atom = 0x58304 + Strike Atom = 0xd206 + Strong Atom = 0x6dd06 + Style Atom = 0x6ff05 + Sub Atom = 0x66d03 + Summary Atom = 0x70407 + Sup Atom = 0x70b03 + Svg Atom = 0x70e03 + System Atom = 0x71106 + Tabindex Atom = 0x4be08 + Table Atom = 0x59505 + Target Atom = 0x2c406 + Tbody Atom = 0x2705 + Td Atom = 0x9202 + Template Atom = 0x71408 + Textarea Atom = 0x35208 + Tfoot Atom = 0xf505 + Th Atom = 0x15602 + Thead Atom = 0x33005 + Time Atom = 0x4204 + Title Atom = 0x11005 + Tr Atom = 0xcc02 + Track Atom = 0x1ba05 + Translate Atom = 0x1f209 + Tt Atom = 0x6802 + Type Atom = 0xd904 + Typemustmatch Atom = 0x2900d + U Atom = 0xb01 + Ul Atom = 0xa702 + Updateviacache Atom = 0x460e + Usemap Atom = 0x59e06 + Value Atom = 0x1505 + Var Atom = 0x16d03 + Video Atom = 0x2f105 + Wbr Atom = 0x57c03 + Width Atom = 0x64905 + Workertype Atom = 0x71c0a + Wrap Atom = 0x72604 + Xmp Atom = 0x12f03 +) + +const hash0 = 0x81cdf10e + +const maxAtomLen = 25 + +var table = [1 << 9]Atom{ + 0x1: 0xe60a, // mediagroup + 0x2: 0x2e404, // lang + 0x4: 0x2c09, // accesskey + 0x5: 0x8b08, // frameset + 0x7: 0x63a08, // onselect + 0x8: 0x71106, // system + 0xa: 0x64905, // width + 0xc: 0x2890b, // formenctype + 0xd: 0x13702, // ol + 0xe: 0x3970b, // oncuechange + 0x10: 0x14b03, // bdo + 0x11: 0x11505, // audio + 0x12: 0x17a09, // draggable + 0x14: 0x2f105, // video + 0x15: 0x2b102, // mn + 0x16: 0x38704, // menu + 0x17: 0x2cf06, // poster + 0x19: 0xf606, // footer + 0x1a: 0x2a806, // method + 0x1b: 0x2b808, // datetime + 0x1c: 0x19507, // onabort + 0x1d: 0x460e, // updateviacache + 0x1e: 0xff05, // async + 0x1f: 0x49d06, // onload + 0x21: 0x11908, // oncancel + 0x22: 0x62908, // onseeked + 0x23: 0x30205, // image + 0x24: 0x5d812, // onrejectionhandled + 0x26: 0x17404, // link + 0x27: 0x51d06, // output + 0x28: 0x33104, // head + 0x29: 0x4ff0c, // onmouseleave + 0x2a: 0x57f07, // onpaste + 0x2b: 0x5a409, // onplaying + 0x2c: 0x1c407, // colspan + 0x2f: 0x1bf05, // color + 0x30: 0x5f504, // size + 0x31: 0x2e80a, // http-equiv + 0x33: 0x601, // i + 0x34: 0x5590a, // onpagehide + 0x35: 0x68c14, // onunhandledrejection + 0x37: 0x42a07, // onerror + 0x3a: 0x3b08, // basefont + 0x3f: 0x1303, // nav + 0x40: 0x17704, // kind + 0x41: 0x35708, // readonly + 0x42: 0x30806, // mglyph + 0x44: 0xb202, // li + 0x46: 0x2d506, // hidden + 0x47: 0x70e03, // svg + 0x48: 0x58304, // step + 0x49: 0x23f09, // integrity + 0x4a: 0x58606, // public + 0x4c: 0x1ab03, // col + 0x4d: 0x1870a, // blockquote + 0x4e: 0x34f02, // h5 + 0x50: 0x5b908, // progress + 0x51: 0x5f505, // sizes + 0x52: 0x34502, // h4 + 0x56: 0x33005, // thead + 0x57: 0xd607, // keytype + 0x58: 0x5b70a, // onprogress + 0x59: 0x44b09, // inputmode + 0x5a: 0x3b109, // ondragend + 0x5d: 0x3a205, // oncut + 0x5e: 0x43706, // spacer + 0x5f: 0x1ab08, // colgroup + 0x62: 0x16502, // is + 0x65: 0x3c02, // as + 0x66: 0x54809, // onoffline + 0x67: 0x33706, // sorted + 0x69: 0x48d10, // onlanguagechange + 0x6c: 0x43d0c, // onhashchange + 0x6d: 0x9604, // name + 0x6e: 0xf505, // tfoot + 0x6f: 0x56104, // desc + 0x70: 0x33d03, // max + 0x72: 0x1ea06, // coords + 0x73: 0x30d02, // h3 + 0x74: 0x6e70e, // onbeforeunload + 0x75: 0x9c04, // rows + 0x76: 0x63c06, // select + 0x77: 0x9805, // meter + 0x78: 0x38b06, // itemid + 0x79: 0x53c0c, // onmousewheel + 0x7a: 0x5c006, // srcdoc + 0x7d: 0x1ba05, // track + 0x7f: 0x31f08, // itemtype + 0x82: 0xa402, // mo + 0x83: 0x41b08, // onchange + 0x84: 0x33107, // headers + 0x85: 0x5cc0c, // onratechange + 0x86: 0x60819, // onsecuritypolicyviolation + 0x88: 0x4a508, // datalist + 0x89: 0x4e80b, // onmousedown + 0x8a: 0x1ef04, // slot + 0x8b: 0x4b010, // onloadedmetadata + 0x8c: 0x1a06, // accept + 0x8d: 0x26806, // object + 0x91: 0x6b30e, // onvolumechange + 0x92: 0x2107, // charset + 0x93: 0x27613, // onautocompleteerror + 0x94: 0xc113, // allowpaymentrequest + 0x95: 0x2804, // body + 0x96: 0x10a07, // default + 0x97: 0x63c08, // selected + 0x98: 0x21e04, // face + 0x99: 0x1e505, // shape + 0x9b: 0x68408, // ontoggle + 0x9e: 0x64b02, // dt + 0x9f: 0xb604, // mark + 0xa1: 0xb01, // u + 0xa4: 0x6ab08, // onunload + 0xa5: 0x5d04, // loop + 0xa6: 0x16408, // disabled + 0xaa: 0x42307, // onended + 0xab: 0xb00a, // malignmark + 0xad: 0x67b09, // onsuspend + 0xae: 0x35105, // mtext + 0xaf: 0x64f06, // onsort + 0xb0: 0x19d08, // itemprop + 0xb3: 0x67109, // itemscope + 0xb4: 0x17305, // blink + 0xb6: 0x3b106, // ondrag + 0xb7: 0xa702, // ul + 0xb8: 0x26e04, // form + 0xb9: 0x12907, // sandbox + 0xba: 0x8b05, // frame + 0xbb: 0x1505, // value + 0xbc: 0x66209, // onstorage + 0xbf: 0xaa07, // acronym + 0xc0: 0x19a02, // rt + 0xc2: 0x202, // br + 0xc3: 0x22608, // fieldset + 0xc4: 0x2900d, // typemustmatch + 0xc5: 0xa208, // nomodule + 0xc6: 0x6c07, // noembed + 0xc7: 0x69e0d, // onbeforeprint + 0xc8: 0x19106, // button + 0xc9: 0x2f507, // onclick + 0xca: 0x70407, // summary + 0xcd: 0xfb04, // ruby + 0xce: 0x56405, // class + 0xcf: 0x3f40b, // ondragstart + 0xd0: 0x23107, // caption + 0xd4: 0xdd0e, // allowusermedia + 0xd5: 0x4cf0b, // onloadstart + 0xd9: 0x16b03, // div + 0xda: 0x4a904, // list + 0xdb: 0x32e04, // math + 0xdc: 0x44b05, // input + 0xdf: 0x3ea0a, // ondragover + 0xe0: 0x2de02, // h2 + 0xe2: 0x1b209, // plaintext + 0xe4: 0x4f30c, // onmouseenter + 0xe7: 0x47907, // checked + 0xe8: 0x47003, // pre + 0xea: 0x35f08, // multiple + 0xeb: 0xba03, // bdi + 0xec: 0x33d09, // maxlength + 0xed: 0xcf01, // q + 0xee: 0x61f0a, // onauxclick + 0xf0: 0x57c03, // wbr + 0xf2: 0x3b04, // base + 0xf3: 0x6e306, // option + 0xf5: 0x41310, // ondurationchange + 0xf7: 0x8908, // noframes + 0xf9: 0x40508, // dropzone + 0xfb: 0x67505, // scope + 0xfc: 0x8008, // reversed + 0xfd: 0x3ba0b, // ondragenter + 0xfe: 0x3fa05, // start + 0xff: 0x12f03, // xmp + 0x100: 0x5f907, // srclang + 0x101: 0x30703, // img + 0x104: 0x101, // b + 0x105: 0x25403, // for + 0x106: 0x10705, // aside + 0x107: 0x44907, // oninput + 0x108: 0x35604, // area + 0x109: 0x2a40a, // formmethod + 0x10a: 0x72604, // wrap + 0x10c: 0x23c02, // rp + 0x10d: 0x46b0a, // onkeypress + 0x10e: 0x6802, // tt + 0x110: 0x34702, // mi + 0x111: 0x36705, // muted + 0x112: 0xf303, // alt + 0x113: 0x5c504, // code + 0x114: 0x6e02, // em + 0x115: 0x3c50a, // ondragexit + 0x117: 0x9f04, // span + 0x119: 0x6d708, // manifest + 0x11a: 0x38708, // menuitem + 0x11b: 0x58b07, // content + 0x11d: 0x6c109, // onwaiting + 0x11f: 0x4c609, // onloadend + 0x121: 0x37e0d, // oncontextmenu + 0x123: 0x56d06, // onblur + 0x124: 0x3fc07, // article + 0x125: 0x9303, // dir + 0x126: 0xef04, // ping + 0x127: 0x24c08, // required + 0x128: 0x45509, // oninvalid + 0x129: 0xb105, // align + 0x12b: 0x58a04, // icon + 0x12c: 0x64d02, // h6 + 0x12d: 0x1c404, // cols + 0x12e: 0x22e0a, // figcaption + 0x12f: 0x45e09, // onkeydown + 0x130: 0x66b08, // onsubmit + 0x131: 0x14d09, // oncanplay + 0x132: 0x70b03, // sup + 0x133: 0xc01, // p + 0x135: 0x40a09, // onemptied + 0x136: 0x39106, // oncopy + 0x137: 0x19c04, // cite + 0x138: 0x3a70a, // ondblclick + 0x13a: 0x50b0b, // onmousemove + 0x13c: 0x66d03, // sub + 0x13d: 0x48703, // rel + 0x13e: 0x5f08, // optgroup + 0x142: 0x9c07, // rowspan + 0x143: 0x37806, // source + 0x144: 0x21608, // noscript + 0x145: 0x1a304, // open + 0x146: 0x20403, // ins + 0x147: 0x2540d, // foreignObject + 0x148: 0x5ad0a, // onpopstate + 0x14a: 0x28d07, // enctype + 0x14b: 0x2760e, // onautocomplete + 0x14c: 0x35208, // textarea + 0x14e: 0x2780c, // autocomplete + 0x14f: 0x15702, // hr + 0x150: 0x1de08, // controls + 0x151: 0x10902, // id + 0x153: 0x2360c, // onafterprint + 0x155: 0x2610d, // foreignobject + 0x156: 0x32707, // marquee + 0x157: 0x59a07, // onpause + 0x158: 0x5e602, // dl + 0x159: 0x5206, // height + 0x15a: 0x34703, // min + 0x15b: 0x9307, // dirname + 0x15c: 0x1f209, // translate + 0x15d: 0x5604, // html + 0x15e: 0x34709, // minlength + 0x15f: 0x48607, // preload + 0x160: 0x71408, // template + 0x161: 0x3df0b, // ondragleave + 0x162: 0x3a02, // rb + 0x164: 0x5c003, // src + 0x165: 0x6dd06, // strong + 0x167: 0x7804, // samp + 0x168: 0x6f307, // address + 0x169: 0x55108, // ononline + 0x16b: 0x1310b, // placeholder + 0x16c: 0x2c406, // target + 0x16d: 0x20605, // small + 0x16e: 0x6ca07, // onwheel + 0x16f: 0x1c90a, // annotation + 0x170: 0x4740a, // spellcheck + 0x171: 0x7207, // details + 0x172: 0x10306, // canvas + 0x173: 0x12109, // autofocus + 0x174: 0xc05, // param + 0x176: 0x46308, // download + 0x177: 0x45203, // del + 0x178: 0x36c07, // onclose + 0x179: 0xb903, // kbd + 0x17a: 0x31906, // applet + 0x17b: 0x2e004, // href + 0x17c: 0x5f108, // onresize + 0x17e: 0x49d0c, // onloadeddata + 0x180: 0xcc02, // tr + 0x181: 0x2c00a, // formtarget + 0x182: 0x11005, // title + 0x183: 0x6ff05, // style + 0x184: 0xd206, // strike + 0x185: 0x59e06, // usemap + 0x186: 0x2fc06, // iframe + 0x187: 0x1004, // main + 0x189: 0x7b07, // picture + 0x18c: 0x31605, // ismap + 0x18e: 0x4a504, // data + 0x18f: 0x5905, // label + 0x191: 0x3d10e, // referrerpolicy + 0x192: 0x15602, // th + 0x194: 0x53606, // prompt + 0x195: 0x56807, // section + 0x197: 0x6d107, // optimum + 0x198: 0x2db04, // high + 0x199: 0x15c02, // h1 + 0x19a: 0x65909, // onstalled + 0x19b: 0x16d03, // var + 0x19c: 0x4204, // time + 0x19e: 0x67402, // ms + 0x19f: 0x33106, // header + 0x1a0: 0x4da09, // onmessage + 0x1a1: 0x1a605, // nonce + 0x1a2: 0x26e0a, // formaction + 0x1a3: 0x22006, // center + 0x1a4: 0x3704, // nobr + 0x1a5: 0x59505, // table + 0x1a6: 0x4a907, // listing + 0x1a7: 0x18106, // legend + 0x1a9: 0x29b09, // challenge + 0x1aa: 0x24806, // figure + 0x1ab: 0xe605, // media + 0x1ae: 0xd904, // type + 0x1af: 0x3f04, // font + 0x1b0: 0x4da0e, // onmessageerror + 0x1b1: 0x37108, // seamless + 0x1b2: 0x8703, // dfn + 0x1b3: 0x5c705, // defer + 0x1b4: 0xc303, // low + 0x1b5: 0x19a03, // rtc + 0x1b6: 0x5230b, // onmouseover + 0x1b7: 0x2b20a, // novalidate + 0x1b8: 0x71c0a, // workertype + 0x1ba: 0x3cd07, // itemref + 0x1bd: 0x1, // a + 0x1be: 0x31803, // map + 0x1bf: 0x400c, // ontimeupdate + 0x1c0: 0x15e07, // bgsound + 0x1c1: 0x3206, // keygen + 0x1c2: 0x2705, // tbody + 0x1c5: 0x64406, // onshow + 0x1c7: 0x2501, // s + 0x1c8: 0x6607, // pattern + 0x1cc: 0x14d10, // oncanplaythrough + 0x1ce: 0x2d702, // dd + 0x1cf: 0x6f906, // srcset + 0x1d0: 0x17003, // big + 0x1d2: 0x65108, // sortable + 0x1d3: 0x48007, // onkeyup + 0x1d5: 0x5a406, // onplay + 0x1d7: 0x4b804, // meta + 0x1d8: 0x40306, // ondrop + 0x1da: 0x60008, // onscroll + 0x1db: 0x1fb0b, // crossorigin + 0x1dc: 0x5730a, // onpageshow + 0x1dd: 0x4, // abbr + 0x1de: 0x9202, // td + 0x1df: 0x58b0f, // contenteditable + 0x1e0: 0x27206, // action + 0x1e1: 0x1400b, // playsinline + 0x1e2: 0x43107, // onfocus + 0x1e3: 0x2e008, // hreflang + 0x1e5: 0x5160a, // onmouseout + 0x1e6: 0x5ea07, // onreset + 0x1e7: 0x13c08, // autoplay + 0x1e8: 0x63109, // onseeking + 0x1ea: 0x67506, // scoped + 0x1ec: 0x30a, // radiogroup + 0x1ee: 0x3800b, // contextmenu + 0x1ef: 0x52e09, // onmouseup + 0x1f1: 0x2ca06, // hgroup + 0x1f2: 0x2080f, // allowfullscreen + 0x1f3: 0x4be08, // tabindex + 0x1f6: 0x30f07, // isindex + 0x1f7: 0x1a0e, // accept-charset + 0x1f8: 0x2ae0e, // formnovalidate + 0x1fb: 0x1c90e, // annotation-xml + 0x1fc: 0x6e05, // embed + 0x1fd: 0x21806, // script + 0x1fe: 0xbb06, // dialog + 0x1ff: 0x1d707, // command +} + +const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + + "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" + + "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" + + "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" + + "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" + + "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + + "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + + "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + + "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + + "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + + "ignObjectforeignobjectformactionautocompleteerrorformenctype" + + "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + + "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + + "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + + "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + + "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + + "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + + "articleondropzonemptiedondurationchangeonendedonerroronfocus" + + "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + + "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + + "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + + "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + + "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + + "classectionbluronpageshowbronpastepublicontenteditableonpaus" + + "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + + "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + + "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + + "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + + "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + + "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + + "arysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go new file mode 100644 index 0000000000..ff7acf2d5b --- /dev/null +++ b/vendor/golang.org/x/net/html/const.go @@ -0,0 +1,111 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// Section 12.2.4.2 of the HTML5 specification says "The following elements +// have varying levels of special parsing rules". +// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements +var isSpecialElementMap = map[string]bool{ + "address": true, + "applet": true, + "area": true, + "article": true, + "aside": true, + "base": true, + "basefont": true, + "bgsound": true, + "blockquote": true, + "body": true, + "br": true, + "button": true, + "caption": true, + "center": true, + "col": true, + "colgroup": true, + "dd": true, + "details": true, + "dir": true, + "div": true, + "dl": true, + "dt": true, + "embed": true, + "fieldset": true, + "figcaption": true, + "figure": true, + "footer": true, + "form": true, + "frame": true, + "frameset": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "header": true, + "hgroup": true, + "hr": true, + "html": true, + "iframe": true, + "img": true, + "input": true, + "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility. + "li": true, + "link": true, + "listing": true, + "main": true, + "marquee": true, + "menu": true, + "meta": true, + "nav": true, + "noembed": true, + "noframes": true, + "noscript": true, + "object": true, + "ol": true, + "p": true, + "param": true, + "plaintext": true, + "pre": true, + "script": true, + "section": true, + "select": true, + "source": true, + "style": true, + "summary": true, + "table": true, + "tbody": true, + "td": true, + "template": true, + "textarea": true, + "tfoot": true, + "th": true, + "thead": true, + "title": true, + "tr": true, + "track": true, + "ul": true, + "wbr": true, + "xmp": true, +} + +func isSpecialElement(element *Node) bool { + switch element.Namespace { + case "", "html": + return isSpecialElementMap[element.Data] + case "math": + switch element.Data { + case "mi", "mo", "mn", "ms", "mtext", "annotation-xml": + return true + } + case "svg": + switch element.Data { + case "foreignObject", "desc", "title": + return true + } + } + return false +} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go new file mode 100644 index 0000000000..2466ae3d9a --- /dev/null +++ b/vendor/golang.org/x/net/html/doc.go @@ -0,0 +1,127 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package html implements an HTML5-compliant tokenizer and parser. + +Tokenization is done by creating a Tokenizer for an io.Reader r. It is the +caller's responsibility to ensure that r provides UTF-8 encoded HTML. + + z := html.NewTokenizer(r) + +Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), +which parses the next token and returns its type, or an error: + + for { + tt := z.Next() + if tt == html.ErrorToken { + // ... + return ... + } + // Process the current token. + } + +There are two APIs for retrieving the current token. The high-level API is to +call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs +allow optionally calling Raw after Next but before Token, Text, TagName, or +TagAttr. In EBNF notation, the valid call sequence per token is: + + Next {Raw} [ Token | Text | TagName {TagAttr} ] + +Token returns an independent data structure that completely describes a token. +Entities (such as "<") are unescaped, tag names and attribute keys are +lower-cased, and attributes are collected into a []Attribute. For example: + + for { + if z.Next() == html.ErrorToken { + // Returning io.EOF indicates success. + return z.Err() + } + emitToken(z.Token()) + } + +The low-level API performs fewer allocations and copies, but the contents of +the []byte values returned by Text, TagName and TagAttr may change on the next +call to Next. For example, to extract an HTML page's anchor text: + + depth := 0 + for { + tt := z.Next() + switch tt { + case html.ErrorToken: + return z.Err() + case html.TextToken: + if depth > 0 { + // emitBytes should copy the []byte it receives, + // if it doesn't process it immediately. + emitBytes(z.Text()) + } + case html.StartTagToken, html.EndTagToken: + tn, _ := z.TagName() + if len(tn) == 1 && tn[0] == 'a' { + if tt == html.StartTagToken { + depth++ + } else { + depth-- + } + } + } + } + +Parsing is done by calling Parse with an io.Reader, which returns the root of +the parse tree (the document element) as a *Node. It is the caller's +responsibility to ensure that the Reader provides UTF-8 encoded HTML. For +example, to process each anchor node in depth-first order: + + doc, err := html.Parse(r) + if err != nil { + // ... + } + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + // Do something with n... + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + +The relevant specifications include: +https://html.spec.whatwg.org/multipage/syntax.html and +https://html.spec.whatwg.org/multipage/syntax.html#tokenization + +# Security Considerations + +Care should be taken when parsing and interpreting HTML, whether full documents +or fragments, within the framework of the HTML specification, especially with +regard to untrusted inputs. + +This package provides both a tokenizer and a parser, which implement the +tokenization, and tokenization and tree construction stages of the WHATWG HTML +parsing specification respectively. While the tokenizer parses and normalizes +individual HTML tokens, only the parser constructs the DOM tree from the +tokenized HTML, as described in the tree construction stage of the +specification, dynamically modifying or extending the docuemnt's DOM tree. + +If your use case requires semantically well-formed HTML documents, as defined by +the WHATWG specification, the parser should be used rather than the tokenizer. + +In security contexts, if trust decisions are being made using the tokenized or +parsed content, the input must be re-serialized (for instance by using Render or +Token.String) in order for those trust decisions to hold, as the process of +tokenization or parsing may alter the content. +*/ +package html // import "golang.org/x/net/html" + +// The tokenization algorithm implemented by this package is not a line-by-line +// transliteration of the relatively verbose state-machine in the WHATWG +// specification. A more direct approach is used instead, where the program +// counter implies the state, such as whether it is tokenizing a tag or a text +// node. Specification compliance is verified by checking expected and actual +// outputs over a test suite rather than aiming for algorithmic fidelity. + +// TODO(nigeltao): Does a DOM API belong in this package or a separate one? +// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go new file mode 100644 index 0000000000..c484e5a94f --- /dev/null +++ b/vendor/golang.org/x/net/html/doctype.go @@ -0,0 +1,156 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +// parseDoctype parses the data from a DoctypeToken into a name, +// public identifier, and system identifier. It returns a Node whose Type +// is DoctypeNode, whose Data is the name, and which has attributes +// named "system" and "public" for the two identifiers if they were present. +// quirks is whether the document should be parsed in "quirks mode". +func parseDoctype(s string) (n *Node, quirks bool) { + n = &Node{Type: DoctypeNode} + + // Find the name. + space := strings.IndexAny(s, whitespace) + if space == -1 { + space = len(s) + } + n.Data = s[:space] + // The comparison to "html" is case-sensitive. + if n.Data != "html" { + quirks = true + } + n.Data = strings.ToLower(n.Data) + s = strings.TrimLeft(s[space:], whitespace) + + if len(s) < 6 { + // It can't start with "PUBLIC" or "SYSTEM". + // Ignore the rest of the string. + return n, quirks || s != "" + } + + key := strings.ToLower(s[:6]) + s = s[6:] + for key == "public" || key == "system" { + s = strings.TrimLeft(s, whitespace) + if s == "" { + break + } + quote := s[0] + if quote != '"' && quote != '\'' { + break + } + s = s[1:] + q := strings.IndexRune(s, rune(quote)) + var id string + if q == -1 { + id = s + s = "" + } else { + id = s[:q] + s = s[q+1:] + } + n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) + if key == "public" { + key = "system" + } else { + key = "" + } + } + + if key != "" || s != "" { + quirks = true + } else if len(n.Attr) > 0 { + if n.Attr[0].Key == "public" { + public := strings.ToLower(n.Attr[0].Val) + switch public { + case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": + quirks = true + default: + for _, q := range quirkyIDs { + if strings.HasPrefix(public, q) { + quirks = true + break + } + } + } + // The following two public IDs only cause quirks mode if there is no system ID. + if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || + strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { + quirks = true + } + } + if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && + strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + quirks = true + } + } + + return n, quirks +} + +// quirkyIDs is a list of public doctype identifiers that cause a document +// to be interpreted in quirks mode. The identifiers should be in lower case. +var quirkyIDs = []string{ + "+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//", +} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go new file mode 100644 index 0000000000..b628880a01 --- /dev/null +++ b/vendor/golang.org/x/net/html/entity.go @@ -0,0 +1,2253 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// All entities that do not end with ';' are 6 or fewer bytes long. +const longestEntityWithoutSemicolon = 6 + +// entity is a map from HTML entity names to their values. The semicolon matters: +// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references +// lists both "amp" and "amp;" as two separate entries. +// +// Note that the HTML5 list is larger than the HTML4 list at +// http://www.w3.org/TR/html4/sgml/entities.html +var entity = map[string]rune{ + "AElig;": '\U000000C6', + "AMP;": '\U00000026', + "Aacute;": '\U000000C1', + "Abreve;": '\U00000102', + "Acirc;": '\U000000C2', + "Acy;": '\U00000410', + "Afr;": '\U0001D504', + "Agrave;": '\U000000C0', + "Alpha;": '\U00000391', + "Amacr;": '\U00000100', + "And;": '\U00002A53', + "Aogon;": '\U00000104', + "Aopf;": '\U0001D538', + "ApplyFunction;": '\U00002061', + "Aring;": '\U000000C5', + "Ascr;": '\U0001D49C', + "Assign;": '\U00002254', + "Atilde;": '\U000000C3', + "Auml;": '\U000000C4', + "Backslash;": '\U00002216', + "Barv;": '\U00002AE7', + "Barwed;": '\U00002306', + "Bcy;": '\U00000411', + "Because;": '\U00002235', + "Bernoullis;": '\U0000212C', + "Beta;": '\U00000392', + "Bfr;": '\U0001D505', + "Bopf;": '\U0001D539', + "Breve;": '\U000002D8', + "Bscr;": '\U0000212C', + "Bumpeq;": '\U0000224E', + "CHcy;": '\U00000427', + "COPY;": '\U000000A9', + "Cacute;": '\U00000106', + "Cap;": '\U000022D2', + "CapitalDifferentialD;": '\U00002145', + "Cayleys;": '\U0000212D', + "Ccaron;": '\U0000010C', + "Ccedil;": '\U000000C7', + "Ccirc;": '\U00000108', + "Cconint;": '\U00002230', + "Cdot;": '\U0000010A', + "Cedilla;": '\U000000B8', + "CenterDot;": '\U000000B7', + "Cfr;": '\U0000212D', + "Chi;": '\U000003A7', + "CircleDot;": '\U00002299', + "CircleMinus;": '\U00002296', + "CirclePlus;": '\U00002295', + "CircleTimes;": '\U00002297', + "ClockwiseContourIntegral;": '\U00002232', + "CloseCurlyDoubleQuote;": '\U0000201D', + "CloseCurlyQuote;": '\U00002019', + "Colon;": '\U00002237', + "Colone;": '\U00002A74', + "Congruent;": '\U00002261', + "Conint;": '\U0000222F', + "ContourIntegral;": '\U0000222E', + "Copf;": '\U00002102', + "Coproduct;": '\U00002210', + "CounterClockwiseContourIntegral;": '\U00002233', + "Cross;": '\U00002A2F', + "Cscr;": '\U0001D49E', + "Cup;": '\U000022D3', + "CupCap;": '\U0000224D', + "DD;": '\U00002145', + "DDotrahd;": '\U00002911', + "DJcy;": '\U00000402', + "DScy;": '\U00000405', + "DZcy;": '\U0000040F', + "Dagger;": '\U00002021', + "Darr;": '\U000021A1', + "Dashv;": '\U00002AE4', + "Dcaron;": '\U0000010E', + "Dcy;": '\U00000414', + "Del;": '\U00002207', + "Delta;": '\U00000394', + "Dfr;": '\U0001D507', + "DiacriticalAcute;": '\U000000B4', + "DiacriticalDot;": '\U000002D9', + "DiacriticalDoubleAcute;": '\U000002DD', + "DiacriticalGrave;": '\U00000060', + "DiacriticalTilde;": '\U000002DC', + "Diamond;": '\U000022C4', + "DifferentialD;": '\U00002146', + "Dopf;": '\U0001D53B', + "Dot;": '\U000000A8', + "DotDot;": '\U000020DC', + "DotEqual;": '\U00002250', + "DoubleContourIntegral;": '\U0000222F', + "DoubleDot;": '\U000000A8', + "DoubleDownArrow;": '\U000021D3', + "DoubleLeftArrow;": '\U000021D0', + "DoubleLeftRightArrow;": '\U000021D4', + "DoubleLeftTee;": '\U00002AE4', + "DoubleLongLeftArrow;": '\U000027F8', + "DoubleLongLeftRightArrow;": '\U000027FA', + "DoubleLongRightArrow;": '\U000027F9', + "DoubleRightArrow;": '\U000021D2', + "DoubleRightTee;": '\U000022A8', + "DoubleUpArrow;": '\U000021D1', + "DoubleUpDownArrow;": '\U000021D5', + "DoubleVerticalBar;": '\U00002225', + "DownArrow;": '\U00002193', + "DownArrowBar;": '\U00002913', + "DownArrowUpArrow;": '\U000021F5', + "DownBreve;": '\U00000311', + "DownLeftRightVector;": '\U00002950', + "DownLeftTeeVector;": '\U0000295E', + "DownLeftVector;": '\U000021BD', + "DownLeftVectorBar;": '\U00002956', + "DownRightTeeVector;": '\U0000295F', + "DownRightVector;": '\U000021C1', + "DownRightVectorBar;": '\U00002957', + "DownTee;": '\U000022A4', + "DownTeeArrow;": '\U000021A7', + "Downarrow;": '\U000021D3', + "Dscr;": '\U0001D49F', + "Dstrok;": '\U00000110', + "ENG;": '\U0000014A', + "ETH;": '\U000000D0', + "Eacute;": '\U000000C9', + "Ecaron;": '\U0000011A', + "Ecirc;": '\U000000CA', + "Ecy;": '\U0000042D', + "Edot;": '\U00000116', + "Efr;": '\U0001D508', + "Egrave;": '\U000000C8', + "Element;": '\U00002208', + "Emacr;": '\U00000112', + "EmptySmallSquare;": '\U000025FB', + "EmptyVerySmallSquare;": '\U000025AB', + "Eogon;": '\U00000118', + "Eopf;": '\U0001D53C', + "Epsilon;": '\U00000395', + "Equal;": '\U00002A75', + "EqualTilde;": '\U00002242', + "Equilibrium;": '\U000021CC', + "Escr;": '\U00002130', + "Esim;": '\U00002A73', + "Eta;": '\U00000397', + "Euml;": '\U000000CB', + "Exists;": '\U00002203', + "ExponentialE;": '\U00002147', + "Fcy;": '\U00000424', + "Ffr;": '\U0001D509', + "FilledSmallSquare;": '\U000025FC', + "FilledVerySmallSquare;": '\U000025AA', + "Fopf;": '\U0001D53D', + "ForAll;": '\U00002200', + "Fouriertrf;": '\U00002131', + "Fscr;": '\U00002131', + "GJcy;": '\U00000403', + "GT;": '\U0000003E', + "Gamma;": '\U00000393', + "Gammad;": '\U000003DC', + "Gbreve;": '\U0000011E', + "Gcedil;": '\U00000122', + "Gcirc;": '\U0000011C', + "Gcy;": '\U00000413', + "Gdot;": '\U00000120', + "Gfr;": '\U0001D50A', + "Gg;": '\U000022D9', + "Gopf;": '\U0001D53E', + "GreaterEqual;": '\U00002265', + "GreaterEqualLess;": '\U000022DB', + "GreaterFullEqual;": '\U00002267', + "GreaterGreater;": '\U00002AA2', + "GreaterLess;": '\U00002277', + "GreaterSlantEqual;": '\U00002A7E', + "GreaterTilde;": '\U00002273', + "Gscr;": '\U0001D4A2', + "Gt;": '\U0000226B', + "HARDcy;": '\U0000042A', + "Hacek;": '\U000002C7', + "Hat;": '\U0000005E', + "Hcirc;": '\U00000124', + "Hfr;": '\U0000210C', + "HilbertSpace;": '\U0000210B', + "Hopf;": '\U0000210D', + "HorizontalLine;": '\U00002500', + "Hscr;": '\U0000210B', + "Hstrok;": '\U00000126', + "HumpDownHump;": '\U0000224E', + "HumpEqual;": '\U0000224F', + "IEcy;": '\U00000415', + "IJlig;": '\U00000132', + "IOcy;": '\U00000401', + "Iacute;": '\U000000CD', + "Icirc;": '\U000000CE', + "Icy;": '\U00000418', + "Idot;": '\U00000130', + "Ifr;": '\U00002111', + "Igrave;": '\U000000CC', + "Im;": '\U00002111', + "Imacr;": '\U0000012A', + "ImaginaryI;": '\U00002148', + "Implies;": '\U000021D2', + "Int;": '\U0000222C', + "Integral;": '\U0000222B', + "Intersection;": '\U000022C2', + "InvisibleComma;": '\U00002063', + "InvisibleTimes;": '\U00002062', + "Iogon;": '\U0000012E', + "Iopf;": '\U0001D540', + "Iota;": '\U00000399', + "Iscr;": '\U00002110', + "Itilde;": '\U00000128', + "Iukcy;": '\U00000406', + "Iuml;": '\U000000CF', + "Jcirc;": '\U00000134', + "Jcy;": '\U00000419', + "Jfr;": '\U0001D50D', + "Jopf;": '\U0001D541', + "Jscr;": '\U0001D4A5', + "Jsercy;": '\U00000408', + "Jukcy;": '\U00000404', + "KHcy;": '\U00000425', + "KJcy;": '\U0000040C', + "Kappa;": '\U0000039A', + "Kcedil;": '\U00000136', + "Kcy;": '\U0000041A', + "Kfr;": '\U0001D50E', + "Kopf;": '\U0001D542', + "Kscr;": '\U0001D4A6', + "LJcy;": '\U00000409', + "LT;": '\U0000003C', + "Lacute;": '\U00000139', + "Lambda;": '\U0000039B', + "Lang;": '\U000027EA', + "Laplacetrf;": '\U00002112', + "Larr;": '\U0000219E', + "Lcaron;": '\U0000013D', + "Lcedil;": '\U0000013B', + "Lcy;": '\U0000041B', + "LeftAngleBracket;": '\U000027E8', + "LeftArrow;": '\U00002190', + "LeftArrowBar;": '\U000021E4', + "LeftArrowRightArrow;": '\U000021C6', + "LeftCeiling;": '\U00002308', + "LeftDoubleBracket;": '\U000027E6', + "LeftDownTeeVector;": '\U00002961', + "LeftDownVector;": '\U000021C3', + "LeftDownVectorBar;": '\U00002959', + "LeftFloor;": '\U0000230A', + "LeftRightArrow;": '\U00002194', + "LeftRightVector;": '\U0000294E', + "LeftTee;": '\U000022A3', + "LeftTeeArrow;": '\U000021A4', + "LeftTeeVector;": '\U0000295A', + "LeftTriangle;": '\U000022B2', + "LeftTriangleBar;": '\U000029CF', + "LeftTriangleEqual;": '\U000022B4', + "LeftUpDownVector;": '\U00002951', + "LeftUpTeeVector;": '\U00002960', + "LeftUpVector;": '\U000021BF', + "LeftUpVectorBar;": '\U00002958', + "LeftVector;": '\U000021BC', + "LeftVectorBar;": '\U00002952', + "Leftarrow;": '\U000021D0', + "Leftrightarrow;": '\U000021D4', + "LessEqualGreater;": '\U000022DA', + "LessFullEqual;": '\U00002266', + "LessGreater;": '\U00002276', + "LessLess;": '\U00002AA1', + "LessSlantEqual;": '\U00002A7D', + "LessTilde;": '\U00002272', + "Lfr;": '\U0001D50F', + "Ll;": '\U000022D8', + "Lleftarrow;": '\U000021DA', + "Lmidot;": '\U0000013F', + "LongLeftArrow;": '\U000027F5', + "LongLeftRightArrow;": '\U000027F7', + "LongRightArrow;": '\U000027F6', + "Longleftarrow;": '\U000027F8', + "Longleftrightarrow;": '\U000027FA', + "Longrightarrow;": '\U000027F9', + "Lopf;": '\U0001D543', + "LowerLeftArrow;": '\U00002199', + "LowerRightArrow;": '\U00002198', + "Lscr;": '\U00002112', + "Lsh;": '\U000021B0', + "Lstrok;": '\U00000141', + "Lt;": '\U0000226A', + "Map;": '\U00002905', + "Mcy;": '\U0000041C', + "MediumSpace;": '\U0000205F', + "Mellintrf;": '\U00002133', + "Mfr;": '\U0001D510', + "MinusPlus;": '\U00002213', + "Mopf;": '\U0001D544', + "Mscr;": '\U00002133', + "Mu;": '\U0000039C', + "NJcy;": '\U0000040A', + "Nacute;": '\U00000143', + "Ncaron;": '\U00000147', + "Ncedil;": '\U00000145', + "Ncy;": '\U0000041D', + "NegativeMediumSpace;": '\U0000200B', + "NegativeThickSpace;": '\U0000200B', + "NegativeThinSpace;": '\U0000200B', + "NegativeVeryThinSpace;": '\U0000200B', + "NestedGreaterGreater;": '\U0000226B', + "NestedLessLess;": '\U0000226A', + "NewLine;": '\U0000000A', + "Nfr;": '\U0001D511', + "NoBreak;": '\U00002060', + "NonBreakingSpace;": '\U000000A0', + "Nopf;": '\U00002115', + "Not;": '\U00002AEC', + "NotCongruent;": '\U00002262', + "NotCupCap;": '\U0000226D', + "NotDoubleVerticalBar;": '\U00002226', + "NotElement;": '\U00002209', + "NotEqual;": '\U00002260', + "NotExists;": '\U00002204', + "NotGreater;": '\U0000226F', + "NotGreaterEqual;": '\U00002271', + "NotGreaterLess;": '\U00002279', + "NotGreaterTilde;": '\U00002275', + "NotLeftTriangle;": '\U000022EA', + "NotLeftTriangleEqual;": '\U000022EC', + "NotLess;": '\U0000226E', + "NotLessEqual;": '\U00002270', + "NotLessGreater;": '\U00002278', + "NotLessTilde;": '\U00002274', + "NotPrecedes;": '\U00002280', + "NotPrecedesSlantEqual;": '\U000022E0', + "NotReverseElement;": '\U0000220C', + "NotRightTriangle;": '\U000022EB', + "NotRightTriangleEqual;": '\U000022ED', + "NotSquareSubsetEqual;": '\U000022E2', + "NotSquareSupersetEqual;": '\U000022E3', + "NotSubsetEqual;": '\U00002288', + "NotSucceeds;": '\U00002281', + "NotSucceedsSlantEqual;": '\U000022E1', + "NotSupersetEqual;": '\U00002289', + "NotTilde;": '\U00002241', + "NotTildeEqual;": '\U00002244', + "NotTildeFullEqual;": '\U00002247', + "NotTildeTilde;": '\U00002249', + "NotVerticalBar;": '\U00002224', + "Nscr;": '\U0001D4A9', + "Ntilde;": '\U000000D1', + "Nu;": '\U0000039D', + "OElig;": '\U00000152', + "Oacute;": '\U000000D3', + "Ocirc;": '\U000000D4', + "Ocy;": '\U0000041E', + "Odblac;": '\U00000150', + "Ofr;": '\U0001D512', + "Ograve;": '\U000000D2', + "Omacr;": '\U0000014C', + "Omega;": '\U000003A9', + "Omicron;": '\U0000039F', + "Oopf;": '\U0001D546', + "OpenCurlyDoubleQuote;": '\U0000201C', + "OpenCurlyQuote;": '\U00002018', + "Or;": '\U00002A54', + "Oscr;": '\U0001D4AA', + "Oslash;": '\U000000D8', + "Otilde;": '\U000000D5', + "Otimes;": '\U00002A37', + "Ouml;": '\U000000D6', + "OverBar;": '\U0000203E', + "OverBrace;": '\U000023DE', + "OverBracket;": '\U000023B4', + "OverParenthesis;": '\U000023DC', + "PartialD;": '\U00002202', + "Pcy;": '\U0000041F', + "Pfr;": '\U0001D513', + "Phi;": '\U000003A6', + "Pi;": '\U000003A0', + "PlusMinus;": '\U000000B1', + "Poincareplane;": '\U0000210C', + "Popf;": '\U00002119', + "Pr;": '\U00002ABB', + "Precedes;": '\U0000227A', + "PrecedesEqual;": '\U00002AAF', + "PrecedesSlantEqual;": '\U0000227C', + "PrecedesTilde;": '\U0000227E', + "Prime;": '\U00002033', + "Product;": '\U0000220F', + "Proportion;": '\U00002237', + "Proportional;": '\U0000221D', + "Pscr;": '\U0001D4AB', + "Psi;": '\U000003A8', + "QUOT;": '\U00000022', + "Qfr;": '\U0001D514', + "Qopf;": '\U0000211A', + "Qscr;": '\U0001D4AC', + "RBarr;": '\U00002910', + "REG;": '\U000000AE', + "Racute;": '\U00000154', + "Rang;": '\U000027EB', + "Rarr;": '\U000021A0', + "Rarrtl;": '\U00002916', + "Rcaron;": '\U00000158', + "Rcedil;": '\U00000156', + "Rcy;": '\U00000420', + "Re;": '\U0000211C', + "ReverseElement;": '\U0000220B', + "ReverseEquilibrium;": '\U000021CB', + "ReverseUpEquilibrium;": '\U0000296F', + "Rfr;": '\U0000211C', + "Rho;": '\U000003A1', + "RightAngleBracket;": '\U000027E9', + "RightArrow;": '\U00002192', + "RightArrowBar;": '\U000021E5', + "RightArrowLeftArrow;": '\U000021C4', + "RightCeiling;": '\U00002309', + "RightDoubleBracket;": '\U000027E7', + "RightDownTeeVector;": '\U0000295D', + "RightDownVector;": '\U000021C2', + "RightDownVectorBar;": '\U00002955', + "RightFloor;": '\U0000230B', + "RightTee;": '\U000022A2', + "RightTeeArrow;": '\U000021A6', + "RightTeeVector;": '\U0000295B', + "RightTriangle;": '\U000022B3', + "RightTriangleBar;": '\U000029D0', + "RightTriangleEqual;": '\U000022B5', + "RightUpDownVector;": '\U0000294F', + "RightUpTeeVector;": '\U0000295C', + "RightUpVector;": '\U000021BE', + "RightUpVectorBar;": '\U00002954', + "RightVector;": '\U000021C0', + "RightVectorBar;": '\U00002953', + "Rightarrow;": '\U000021D2', + "Ropf;": '\U0000211D', + "RoundImplies;": '\U00002970', + "Rrightarrow;": '\U000021DB', + "Rscr;": '\U0000211B', + "Rsh;": '\U000021B1', + "RuleDelayed;": '\U000029F4', + "SHCHcy;": '\U00000429', + "SHcy;": '\U00000428', + "SOFTcy;": '\U0000042C', + "Sacute;": '\U0000015A', + "Sc;": '\U00002ABC', + "Scaron;": '\U00000160', + "Scedil;": '\U0000015E', + "Scirc;": '\U0000015C', + "Scy;": '\U00000421', + "Sfr;": '\U0001D516', + "ShortDownArrow;": '\U00002193', + "ShortLeftArrow;": '\U00002190', + "ShortRightArrow;": '\U00002192', + "ShortUpArrow;": '\U00002191', + "Sigma;": '\U000003A3', + "SmallCircle;": '\U00002218', + "Sopf;": '\U0001D54A', + "Sqrt;": '\U0000221A', + "Square;": '\U000025A1', + "SquareIntersection;": '\U00002293', + "SquareSubset;": '\U0000228F', + "SquareSubsetEqual;": '\U00002291', + "SquareSuperset;": '\U00002290', + "SquareSupersetEqual;": '\U00002292', + "SquareUnion;": '\U00002294', + "Sscr;": '\U0001D4AE', + "Star;": '\U000022C6', + "Sub;": '\U000022D0', + "Subset;": '\U000022D0', + "SubsetEqual;": '\U00002286', + "Succeeds;": '\U0000227B', + "SucceedsEqual;": '\U00002AB0', + "SucceedsSlantEqual;": '\U0000227D', + "SucceedsTilde;": '\U0000227F', + "SuchThat;": '\U0000220B', + "Sum;": '\U00002211', + "Sup;": '\U000022D1', + "Superset;": '\U00002283', + "SupersetEqual;": '\U00002287', + "Supset;": '\U000022D1', + "THORN;": '\U000000DE', + "TRADE;": '\U00002122', + "TSHcy;": '\U0000040B', + "TScy;": '\U00000426', + "Tab;": '\U00000009', + "Tau;": '\U000003A4', + "Tcaron;": '\U00000164', + "Tcedil;": '\U00000162', + "Tcy;": '\U00000422', + "Tfr;": '\U0001D517', + "Therefore;": '\U00002234', + "Theta;": '\U00000398', + "ThinSpace;": '\U00002009', + "Tilde;": '\U0000223C', + "TildeEqual;": '\U00002243', + "TildeFullEqual;": '\U00002245', + "TildeTilde;": '\U00002248', + "Topf;": '\U0001D54B', + "TripleDot;": '\U000020DB', + "Tscr;": '\U0001D4AF', + "Tstrok;": '\U00000166', + "Uacute;": '\U000000DA', + "Uarr;": '\U0000219F', + "Uarrocir;": '\U00002949', + "Ubrcy;": '\U0000040E', + "Ubreve;": '\U0000016C', + "Ucirc;": '\U000000DB', + "Ucy;": '\U00000423', + "Udblac;": '\U00000170', + "Ufr;": '\U0001D518', + "Ugrave;": '\U000000D9', + "Umacr;": '\U0000016A', + "UnderBar;": '\U0000005F', + "UnderBrace;": '\U000023DF', + "UnderBracket;": '\U000023B5', + "UnderParenthesis;": '\U000023DD', + "Union;": '\U000022C3', + "UnionPlus;": '\U0000228E', + "Uogon;": '\U00000172', + "Uopf;": '\U0001D54C', + "UpArrow;": '\U00002191', + "UpArrowBar;": '\U00002912', + "UpArrowDownArrow;": '\U000021C5', + "UpDownArrow;": '\U00002195', + "UpEquilibrium;": '\U0000296E', + "UpTee;": '\U000022A5', + "UpTeeArrow;": '\U000021A5', + "Uparrow;": '\U000021D1', + "Updownarrow;": '\U000021D5', + "UpperLeftArrow;": '\U00002196', + "UpperRightArrow;": '\U00002197', + "Upsi;": '\U000003D2', + "Upsilon;": '\U000003A5', + "Uring;": '\U0000016E', + "Uscr;": '\U0001D4B0', + "Utilde;": '\U00000168', + "Uuml;": '\U000000DC', + "VDash;": '\U000022AB', + "Vbar;": '\U00002AEB', + "Vcy;": '\U00000412', + "Vdash;": '\U000022A9', + "Vdashl;": '\U00002AE6', + "Vee;": '\U000022C1', + "Verbar;": '\U00002016', + "Vert;": '\U00002016', + "VerticalBar;": '\U00002223', + "VerticalLine;": '\U0000007C', + "VerticalSeparator;": '\U00002758', + "VerticalTilde;": '\U00002240', + "VeryThinSpace;": '\U0000200A', + "Vfr;": '\U0001D519', + "Vopf;": '\U0001D54D', + "Vscr;": '\U0001D4B1', + "Vvdash;": '\U000022AA', + "Wcirc;": '\U00000174', + "Wedge;": '\U000022C0', + "Wfr;": '\U0001D51A', + "Wopf;": '\U0001D54E', + "Wscr;": '\U0001D4B2', + "Xfr;": '\U0001D51B', + "Xi;": '\U0000039E', + "Xopf;": '\U0001D54F', + "Xscr;": '\U0001D4B3', + "YAcy;": '\U0000042F', + "YIcy;": '\U00000407', + "YUcy;": '\U0000042E', + "Yacute;": '\U000000DD', + "Ycirc;": '\U00000176', + "Ycy;": '\U0000042B', + "Yfr;": '\U0001D51C', + "Yopf;": '\U0001D550', + "Yscr;": '\U0001D4B4', + "Yuml;": '\U00000178', + "ZHcy;": '\U00000416', + "Zacute;": '\U00000179', + "Zcaron;": '\U0000017D', + "Zcy;": '\U00000417', + "Zdot;": '\U0000017B', + "ZeroWidthSpace;": '\U0000200B', + "Zeta;": '\U00000396', + "Zfr;": '\U00002128', + "Zopf;": '\U00002124', + "Zscr;": '\U0001D4B5', + "aacute;": '\U000000E1', + "abreve;": '\U00000103', + "ac;": '\U0000223E', + "acd;": '\U0000223F', + "acirc;": '\U000000E2', + "acute;": '\U000000B4', + "acy;": '\U00000430', + "aelig;": '\U000000E6', + "af;": '\U00002061', + "afr;": '\U0001D51E', + "agrave;": '\U000000E0', + "alefsym;": '\U00002135', + "aleph;": '\U00002135', + "alpha;": '\U000003B1', + "amacr;": '\U00000101', + "amalg;": '\U00002A3F', + "amp;": '\U00000026', + "and;": '\U00002227', + "andand;": '\U00002A55', + "andd;": '\U00002A5C', + "andslope;": '\U00002A58', + "andv;": '\U00002A5A', + "ang;": '\U00002220', + "ange;": '\U000029A4', + "angle;": '\U00002220', + "angmsd;": '\U00002221', + "angmsdaa;": '\U000029A8', + "angmsdab;": '\U000029A9', + "angmsdac;": '\U000029AA', + "angmsdad;": '\U000029AB', + "angmsdae;": '\U000029AC', + "angmsdaf;": '\U000029AD', + "angmsdag;": '\U000029AE', + "angmsdah;": '\U000029AF', + "angrt;": '\U0000221F', + "angrtvb;": '\U000022BE', + "angrtvbd;": '\U0000299D', + "angsph;": '\U00002222', + "angst;": '\U000000C5', + "angzarr;": '\U0000237C', + "aogon;": '\U00000105', + "aopf;": '\U0001D552', + "ap;": '\U00002248', + "apE;": '\U00002A70', + "apacir;": '\U00002A6F', + "ape;": '\U0000224A', + "apid;": '\U0000224B', + "apos;": '\U00000027', + "approx;": '\U00002248', + "approxeq;": '\U0000224A', + "aring;": '\U000000E5', + "ascr;": '\U0001D4B6', + "ast;": '\U0000002A', + "asymp;": '\U00002248', + "asympeq;": '\U0000224D', + "atilde;": '\U000000E3', + "auml;": '\U000000E4', + "awconint;": '\U00002233', + "awint;": '\U00002A11', + "bNot;": '\U00002AED', + "backcong;": '\U0000224C', + "backepsilon;": '\U000003F6', + "backprime;": '\U00002035', + "backsim;": '\U0000223D', + "backsimeq;": '\U000022CD', + "barvee;": '\U000022BD', + "barwed;": '\U00002305', + "barwedge;": '\U00002305', + "bbrk;": '\U000023B5', + "bbrktbrk;": '\U000023B6', + "bcong;": '\U0000224C', + "bcy;": '\U00000431', + "bdquo;": '\U0000201E', + "becaus;": '\U00002235', + "because;": '\U00002235', + "bemptyv;": '\U000029B0', + "bepsi;": '\U000003F6', + "bernou;": '\U0000212C', + "beta;": '\U000003B2', + "beth;": '\U00002136', + "between;": '\U0000226C', + "bfr;": '\U0001D51F', + "bigcap;": '\U000022C2', + "bigcirc;": '\U000025EF', + "bigcup;": '\U000022C3', + "bigodot;": '\U00002A00', + "bigoplus;": '\U00002A01', + "bigotimes;": '\U00002A02', + "bigsqcup;": '\U00002A06', + "bigstar;": '\U00002605', + "bigtriangledown;": '\U000025BD', + "bigtriangleup;": '\U000025B3', + "biguplus;": '\U00002A04', + "bigvee;": '\U000022C1', + "bigwedge;": '\U000022C0', + "bkarow;": '\U0000290D', + "blacklozenge;": '\U000029EB', + "blacksquare;": '\U000025AA', + "blacktriangle;": '\U000025B4', + "blacktriangledown;": '\U000025BE', + "blacktriangleleft;": '\U000025C2', + "blacktriangleright;": '\U000025B8', + "blank;": '\U00002423', + "blk12;": '\U00002592', + "blk14;": '\U00002591', + "blk34;": '\U00002593', + "block;": '\U00002588', + "bnot;": '\U00002310', + "bopf;": '\U0001D553', + "bot;": '\U000022A5', + "bottom;": '\U000022A5', + "bowtie;": '\U000022C8', + "boxDL;": '\U00002557', + "boxDR;": '\U00002554', + "boxDl;": '\U00002556', + "boxDr;": '\U00002553', + "boxH;": '\U00002550', + "boxHD;": '\U00002566', + "boxHU;": '\U00002569', + "boxHd;": '\U00002564', + "boxHu;": '\U00002567', + "boxUL;": '\U0000255D', + "boxUR;": '\U0000255A', + "boxUl;": '\U0000255C', + "boxUr;": '\U00002559', + "boxV;": '\U00002551', + "boxVH;": '\U0000256C', + "boxVL;": '\U00002563', + "boxVR;": '\U00002560', + "boxVh;": '\U0000256B', + "boxVl;": '\U00002562', + "boxVr;": '\U0000255F', + "boxbox;": '\U000029C9', + "boxdL;": '\U00002555', + "boxdR;": '\U00002552', + "boxdl;": '\U00002510', + "boxdr;": '\U0000250C', + "boxh;": '\U00002500', + "boxhD;": '\U00002565', + "boxhU;": '\U00002568', + "boxhd;": '\U0000252C', + "boxhu;": '\U00002534', + "boxminus;": '\U0000229F', + "boxplus;": '\U0000229E', + "boxtimes;": '\U000022A0', + "boxuL;": '\U0000255B', + "boxuR;": '\U00002558', + "boxul;": '\U00002518', + "boxur;": '\U00002514', + "boxv;": '\U00002502', + "boxvH;": '\U0000256A', + "boxvL;": '\U00002561', + "boxvR;": '\U0000255E', + "boxvh;": '\U0000253C', + "boxvl;": '\U00002524', + "boxvr;": '\U0000251C', + "bprime;": '\U00002035', + "breve;": '\U000002D8', + "brvbar;": '\U000000A6', + "bscr;": '\U0001D4B7', + "bsemi;": '\U0000204F', + "bsim;": '\U0000223D', + "bsime;": '\U000022CD', + "bsol;": '\U0000005C', + "bsolb;": '\U000029C5', + "bsolhsub;": '\U000027C8', + "bull;": '\U00002022', + "bullet;": '\U00002022', + "bump;": '\U0000224E', + "bumpE;": '\U00002AAE', + "bumpe;": '\U0000224F', + "bumpeq;": '\U0000224F', + "cacute;": '\U00000107', + "cap;": '\U00002229', + "capand;": '\U00002A44', + "capbrcup;": '\U00002A49', + "capcap;": '\U00002A4B', + "capcup;": '\U00002A47', + "capdot;": '\U00002A40', + "caret;": '\U00002041', + "caron;": '\U000002C7', + "ccaps;": '\U00002A4D', + "ccaron;": '\U0000010D', + "ccedil;": '\U000000E7', + "ccirc;": '\U00000109', + "ccups;": '\U00002A4C', + "ccupssm;": '\U00002A50', + "cdot;": '\U0000010B', + "cedil;": '\U000000B8', + "cemptyv;": '\U000029B2', + "cent;": '\U000000A2', + "centerdot;": '\U000000B7', + "cfr;": '\U0001D520', + "chcy;": '\U00000447', + "check;": '\U00002713', + "checkmark;": '\U00002713', + "chi;": '\U000003C7', + "cir;": '\U000025CB', + "cirE;": '\U000029C3', + "circ;": '\U000002C6', + "circeq;": '\U00002257', + "circlearrowleft;": '\U000021BA', + "circlearrowright;": '\U000021BB', + "circledR;": '\U000000AE', + "circledS;": '\U000024C8', + "circledast;": '\U0000229B', + "circledcirc;": '\U0000229A', + "circleddash;": '\U0000229D', + "cire;": '\U00002257', + "cirfnint;": '\U00002A10', + "cirmid;": '\U00002AEF', + "cirscir;": '\U000029C2', + "clubs;": '\U00002663', + "clubsuit;": '\U00002663', + "colon;": '\U0000003A', + "colone;": '\U00002254', + "coloneq;": '\U00002254', + "comma;": '\U0000002C', + "commat;": '\U00000040', + "comp;": '\U00002201', + "compfn;": '\U00002218', + "complement;": '\U00002201', + "complexes;": '\U00002102', + "cong;": '\U00002245', + "congdot;": '\U00002A6D', + "conint;": '\U0000222E', + "copf;": '\U0001D554', + "coprod;": '\U00002210', + "copy;": '\U000000A9', + "copysr;": '\U00002117', + "crarr;": '\U000021B5', + "cross;": '\U00002717', + "cscr;": '\U0001D4B8', + "csub;": '\U00002ACF', + "csube;": '\U00002AD1', + "csup;": '\U00002AD0', + "csupe;": '\U00002AD2', + "ctdot;": '\U000022EF', + "cudarrl;": '\U00002938', + "cudarrr;": '\U00002935', + "cuepr;": '\U000022DE', + "cuesc;": '\U000022DF', + "cularr;": '\U000021B6', + "cularrp;": '\U0000293D', + "cup;": '\U0000222A', + "cupbrcap;": '\U00002A48', + "cupcap;": '\U00002A46', + "cupcup;": '\U00002A4A', + "cupdot;": '\U0000228D', + "cupor;": '\U00002A45', + "curarr;": '\U000021B7', + "curarrm;": '\U0000293C', + "curlyeqprec;": '\U000022DE', + "curlyeqsucc;": '\U000022DF', + "curlyvee;": '\U000022CE', + "curlywedge;": '\U000022CF', + "curren;": '\U000000A4', + "curvearrowleft;": '\U000021B6', + "curvearrowright;": '\U000021B7', + "cuvee;": '\U000022CE', + "cuwed;": '\U000022CF', + "cwconint;": '\U00002232', + "cwint;": '\U00002231', + "cylcty;": '\U0000232D', + "dArr;": '\U000021D3', + "dHar;": '\U00002965', + "dagger;": '\U00002020', + "daleth;": '\U00002138', + "darr;": '\U00002193', + "dash;": '\U00002010', + "dashv;": '\U000022A3', + "dbkarow;": '\U0000290F', + "dblac;": '\U000002DD', + "dcaron;": '\U0000010F', + "dcy;": '\U00000434', + "dd;": '\U00002146', + "ddagger;": '\U00002021', + "ddarr;": '\U000021CA', + "ddotseq;": '\U00002A77', + "deg;": '\U000000B0', + "delta;": '\U000003B4', + "demptyv;": '\U000029B1', + "dfisht;": '\U0000297F', + "dfr;": '\U0001D521', + "dharl;": '\U000021C3', + "dharr;": '\U000021C2', + "diam;": '\U000022C4', + "diamond;": '\U000022C4', + "diamondsuit;": '\U00002666', + "diams;": '\U00002666', + "die;": '\U000000A8', + "digamma;": '\U000003DD', + "disin;": '\U000022F2', + "div;": '\U000000F7', + "divide;": '\U000000F7', + "divideontimes;": '\U000022C7', + "divonx;": '\U000022C7', + "djcy;": '\U00000452', + "dlcorn;": '\U0000231E', + "dlcrop;": '\U0000230D', + "dollar;": '\U00000024', + "dopf;": '\U0001D555', + "dot;": '\U000002D9', + "doteq;": '\U00002250', + "doteqdot;": '\U00002251', + "dotminus;": '\U00002238', + "dotplus;": '\U00002214', + "dotsquare;": '\U000022A1', + "doublebarwedge;": '\U00002306', + "downarrow;": '\U00002193', + "downdownarrows;": '\U000021CA', + "downharpoonleft;": '\U000021C3', + "downharpoonright;": '\U000021C2', + "drbkarow;": '\U00002910', + "drcorn;": '\U0000231F', + "drcrop;": '\U0000230C', + "dscr;": '\U0001D4B9', + "dscy;": '\U00000455', + "dsol;": '\U000029F6', + "dstrok;": '\U00000111', + "dtdot;": '\U000022F1', + "dtri;": '\U000025BF', + "dtrif;": '\U000025BE', + "duarr;": '\U000021F5', + "duhar;": '\U0000296F', + "dwangle;": '\U000029A6', + "dzcy;": '\U0000045F', + "dzigrarr;": '\U000027FF', + "eDDot;": '\U00002A77', + "eDot;": '\U00002251', + "eacute;": '\U000000E9', + "easter;": '\U00002A6E', + "ecaron;": '\U0000011B', + "ecir;": '\U00002256', + "ecirc;": '\U000000EA', + "ecolon;": '\U00002255', + "ecy;": '\U0000044D', + "edot;": '\U00000117', + "ee;": '\U00002147', + "efDot;": '\U00002252', + "efr;": '\U0001D522', + "eg;": '\U00002A9A', + "egrave;": '\U000000E8', + "egs;": '\U00002A96', + "egsdot;": '\U00002A98', + "el;": '\U00002A99', + "elinters;": '\U000023E7', + "ell;": '\U00002113', + "els;": '\U00002A95', + "elsdot;": '\U00002A97', + "emacr;": '\U00000113', + "empty;": '\U00002205', + "emptyset;": '\U00002205', + "emptyv;": '\U00002205', + "emsp;": '\U00002003', + "emsp13;": '\U00002004', + "emsp14;": '\U00002005', + "eng;": '\U0000014B', + "ensp;": '\U00002002', + "eogon;": '\U00000119', + "eopf;": '\U0001D556', + "epar;": '\U000022D5', + "eparsl;": '\U000029E3', + "eplus;": '\U00002A71', + "epsi;": '\U000003B5', + "epsilon;": '\U000003B5', + "epsiv;": '\U000003F5', + "eqcirc;": '\U00002256', + "eqcolon;": '\U00002255', + "eqsim;": '\U00002242', + "eqslantgtr;": '\U00002A96', + "eqslantless;": '\U00002A95', + "equals;": '\U0000003D', + "equest;": '\U0000225F', + "equiv;": '\U00002261', + "equivDD;": '\U00002A78', + "eqvparsl;": '\U000029E5', + "erDot;": '\U00002253', + "erarr;": '\U00002971', + "escr;": '\U0000212F', + "esdot;": '\U00002250', + "esim;": '\U00002242', + "eta;": '\U000003B7', + "eth;": '\U000000F0', + "euml;": '\U000000EB', + "euro;": '\U000020AC', + "excl;": '\U00000021', + "exist;": '\U00002203', + "expectation;": '\U00002130', + "exponentiale;": '\U00002147', + "fallingdotseq;": '\U00002252', + "fcy;": '\U00000444', + "female;": '\U00002640', + "ffilig;": '\U0000FB03', + "fflig;": '\U0000FB00', + "ffllig;": '\U0000FB04', + "ffr;": '\U0001D523', + "filig;": '\U0000FB01', + "flat;": '\U0000266D', + "fllig;": '\U0000FB02', + "fltns;": '\U000025B1', + "fnof;": '\U00000192', + "fopf;": '\U0001D557', + "forall;": '\U00002200', + "fork;": '\U000022D4', + "forkv;": '\U00002AD9', + "fpartint;": '\U00002A0D', + "frac12;": '\U000000BD', + "frac13;": '\U00002153', + "frac14;": '\U000000BC', + "frac15;": '\U00002155', + "frac16;": '\U00002159', + "frac18;": '\U0000215B', + "frac23;": '\U00002154', + "frac25;": '\U00002156', + "frac34;": '\U000000BE', + "frac35;": '\U00002157', + "frac38;": '\U0000215C', + "frac45;": '\U00002158', + "frac56;": '\U0000215A', + "frac58;": '\U0000215D', + "frac78;": '\U0000215E', + "frasl;": '\U00002044', + "frown;": '\U00002322', + "fscr;": '\U0001D4BB', + "gE;": '\U00002267', + "gEl;": '\U00002A8C', + "gacute;": '\U000001F5', + "gamma;": '\U000003B3', + "gammad;": '\U000003DD', + "gap;": '\U00002A86', + "gbreve;": '\U0000011F', + "gcirc;": '\U0000011D', + "gcy;": '\U00000433', + "gdot;": '\U00000121', + "ge;": '\U00002265', + "gel;": '\U000022DB', + "geq;": '\U00002265', + "geqq;": '\U00002267', + "geqslant;": '\U00002A7E', + "ges;": '\U00002A7E', + "gescc;": '\U00002AA9', + "gesdot;": '\U00002A80', + "gesdoto;": '\U00002A82', + "gesdotol;": '\U00002A84', + "gesles;": '\U00002A94', + "gfr;": '\U0001D524', + "gg;": '\U0000226B', + "ggg;": '\U000022D9', + "gimel;": '\U00002137', + "gjcy;": '\U00000453', + "gl;": '\U00002277', + "glE;": '\U00002A92', + "gla;": '\U00002AA5', + "glj;": '\U00002AA4', + "gnE;": '\U00002269', + "gnap;": '\U00002A8A', + "gnapprox;": '\U00002A8A', + "gne;": '\U00002A88', + "gneq;": '\U00002A88', + "gneqq;": '\U00002269', + "gnsim;": '\U000022E7', + "gopf;": '\U0001D558', + "grave;": '\U00000060', + "gscr;": '\U0000210A', + "gsim;": '\U00002273', + "gsime;": '\U00002A8E', + "gsiml;": '\U00002A90', + "gt;": '\U0000003E', + "gtcc;": '\U00002AA7', + "gtcir;": '\U00002A7A', + "gtdot;": '\U000022D7', + "gtlPar;": '\U00002995', + "gtquest;": '\U00002A7C', + "gtrapprox;": '\U00002A86', + "gtrarr;": '\U00002978', + "gtrdot;": '\U000022D7', + "gtreqless;": '\U000022DB', + "gtreqqless;": '\U00002A8C', + "gtrless;": '\U00002277', + "gtrsim;": '\U00002273', + "hArr;": '\U000021D4', + "hairsp;": '\U0000200A', + "half;": '\U000000BD', + "hamilt;": '\U0000210B', + "hardcy;": '\U0000044A', + "harr;": '\U00002194', + "harrcir;": '\U00002948', + "harrw;": '\U000021AD', + "hbar;": '\U0000210F', + "hcirc;": '\U00000125', + "hearts;": '\U00002665', + "heartsuit;": '\U00002665', + "hellip;": '\U00002026', + "hercon;": '\U000022B9', + "hfr;": '\U0001D525', + "hksearow;": '\U00002925', + "hkswarow;": '\U00002926', + "hoarr;": '\U000021FF', + "homtht;": '\U0000223B', + "hookleftarrow;": '\U000021A9', + "hookrightarrow;": '\U000021AA', + "hopf;": '\U0001D559', + "horbar;": '\U00002015', + "hscr;": '\U0001D4BD', + "hslash;": '\U0000210F', + "hstrok;": '\U00000127', + "hybull;": '\U00002043', + "hyphen;": '\U00002010', + "iacute;": '\U000000ED', + "ic;": '\U00002063', + "icirc;": '\U000000EE', + "icy;": '\U00000438', + "iecy;": '\U00000435', + "iexcl;": '\U000000A1', + "iff;": '\U000021D4', + "ifr;": '\U0001D526', + "igrave;": '\U000000EC', + "ii;": '\U00002148', + "iiiint;": '\U00002A0C', + "iiint;": '\U0000222D', + "iinfin;": '\U000029DC', + "iiota;": '\U00002129', + "ijlig;": '\U00000133', + "imacr;": '\U0000012B', + "image;": '\U00002111', + "imagline;": '\U00002110', + "imagpart;": '\U00002111', + "imath;": '\U00000131', + "imof;": '\U000022B7', + "imped;": '\U000001B5', + "in;": '\U00002208', + "incare;": '\U00002105', + "infin;": '\U0000221E', + "infintie;": '\U000029DD', + "inodot;": '\U00000131', + "int;": '\U0000222B', + "intcal;": '\U000022BA', + "integers;": '\U00002124', + "intercal;": '\U000022BA', + "intlarhk;": '\U00002A17', + "intprod;": '\U00002A3C', + "iocy;": '\U00000451', + "iogon;": '\U0000012F', + "iopf;": '\U0001D55A', + "iota;": '\U000003B9', + "iprod;": '\U00002A3C', + "iquest;": '\U000000BF', + "iscr;": '\U0001D4BE', + "isin;": '\U00002208', + "isinE;": '\U000022F9', + "isindot;": '\U000022F5', + "isins;": '\U000022F4', + "isinsv;": '\U000022F3', + "isinv;": '\U00002208', + "it;": '\U00002062', + "itilde;": '\U00000129', + "iukcy;": '\U00000456', + "iuml;": '\U000000EF', + "jcirc;": '\U00000135', + "jcy;": '\U00000439', + "jfr;": '\U0001D527', + "jmath;": '\U00000237', + "jopf;": '\U0001D55B', + "jscr;": '\U0001D4BF', + "jsercy;": '\U00000458', + "jukcy;": '\U00000454', + "kappa;": '\U000003BA', + "kappav;": '\U000003F0', + "kcedil;": '\U00000137', + "kcy;": '\U0000043A', + "kfr;": '\U0001D528', + "kgreen;": '\U00000138', + "khcy;": '\U00000445', + "kjcy;": '\U0000045C', + "kopf;": '\U0001D55C', + "kscr;": '\U0001D4C0', + "lAarr;": '\U000021DA', + "lArr;": '\U000021D0', + "lAtail;": '\U0000291B', + "lBarr;": '\U0000290E', + "lE;": '\U00002266', + "lEg;": '\U00002A8B', + "lHar;": '\U00002962', + "lacute;": '\U0000013A', + "laemptyv;": '\U000029B4', + "lagran;": '\U00002112', + "lambda;": '\U000003BB', + "lang;": '\U000027E8', + "langd;": '\U00002991', + "langle;": '\U000027E8', + "lap;": '\U00002A85', + "laquo;": '\U000000AB', + "larr;": '\U00002190', + "larrb;": '\U000021E4', + "larrbfs;": '\U0000291F', + "larrfs;": '\U0000291D', + "larrhk;": '\U000021A9', + "larrlp;": '\U000021AB', + "larrpl;": '\U00002939', + "larrsim;": '\U00002973', + "larrtl;": '\U000021A2', + "lat;": '\U00002AAB', + "latail;": '\U00002919', + "late;": '\U00002AAD', + "lbarr;": '\U0000290C', + "lbbrk;": '\U00002772', + "lbrace;": '\U0000007B', + "lbrack;": '\U0000005B', + "lbrke;": '\U0000298B', + "lbrksld;": '\U0000298F', + "lbrkslu;": '\U0000298D', + "lcaron;": '\U0000013E', + "lcedil;": '\U0000013C', + "lceil;": '\U00002308', + "lcub;": '\U0000007B', + "lcy;": '\U0000043B', + "ldca;": '\U00002936', + "ldquo;": '\U0000201C', + "ldquor;": '\U0000201E', + "ldrdhar;": '\U00002967', + "ldrushar;": '\U0000294B', + "ldsh;": '\U000021B2', + "le;": '\U00002264', + "leftarrow;": '\U00002190', + "leftarrowtail;": '\U000021A2', + "leftharpoondown;": '\U000021BD', + "leftharpoonup;": '\U000021BC', + "leftleftarrows;": '\U000021C7', + "leftrightarrow;": '\U00002194', + "leftrightarrows;": '\U000021C6', + "leftrightharpoons;": '\U000021CB', + "leftrightsquigarrow;": '\U000021AD', + "leftthreetimes;": '\U000022CB', + "leg;": '\U000022DA', + "leq;": '\U00002264', + "leqq;": '\U00002266', + "leqslant;": '\U00002A7D', + "les;": '\U00002A7D', + "lescc;": '\U00002AA8', + "lesdot;": '\U00002A7F', + "lesdoto;": '\U00002A81', + "lesdotor;": '\U00002A83', + "lesges;": '\U00002A93', + "lessapprox;": '\U00002A85', + "lessdot;": '\U000022D6', + "lesseqgtr;": '\U000022DA', + "lesseqqgtr;": '\U00002A8B', + "lessgtr;": '\U00002276', + "lesssim;": '\U00002272', + "lfisht;": '\U0000297C', + "lfloor;": '\U0000230A', + "lfr;": '\U0001D529', + "lg;": '\U00002276', + "lgE;": '\U00002A91', + "lhard;": '\U000021BD', + "lharu;": '\U000021BC', + "lharul;": '\U0000296A', + "lhblk;": '\U00002584', + "ljcy;": '\U00000459', + "ll;": '\U0000226A', + "llarr;": '\U000021C7', + "llcorner;": '\U0000231E', + "llhard;": '\U0000296B', + "lltri;": '\U000025FA', + "lmidot;": '\U00000140', + "lmoust;": '\U000023B0', + "lmoustache;": '\U000023B0', + "lnE;": '\U00002268', + "lnap;": '\U00002A89', + "lnapprox;": '\U00002A89', + "lne;": '\U00002A87', + "lneq;": '\U00002A87', + "lneqq;": '\U00002268', + "lnsim;": '\U000022E6', + "loang;": '\U000027EC', + "loarr;": '\U000021FD', + "lobrk;": '\U000027E6', + "longleftarrow;": '\U000027F5', + "longleftrightarrow;": '\U000027F7', + "longmapsto;": '\U000027FC', + "longrightarrow;": '\U000027F6', + "looparrowleft;": '\U000021AB', + "looparrowright;": '\U000021AC', + "lopar;": '\U00002985', + "lopf;": '\U0001D55D', + "loplus;": '\U00002A2D', + "lotimes;": '\U00002A34', + "lowast;": '\U00002217', + "lowbar;": '\U0000005F', + "loz;": '\U000025CA', + "lozenge;": '\U000025CA', + "lozf;": '\U000029EB', + "lpar;": '\U00000028', + "lparlt;": '\U00002993', + "lrarr;": '\U000021C6', + "lrcorner;": '\U0000231F', + "lrhar;": '\U000021CB', + "lrhard;": '\U0000296D', + "lrm;": '\U0000200E', + "lrtri;": '\U000022BF', + "lsaquo;": '\U00002039', + "lscr;": '\U0001D4C1', + "lsh;": '\U000021B0', + "lsim;": '\U00002272', + "lsime;": '\U00002A8D', + "lsimg;": '\U00002A8F', + "lsqb;": '\U0000005B', + "lsquo;": '\U00002018', + "lsquor;": '\U0000201A', + "lstrok;": '\U00000142', + "lt;": '\U0000003C', + "ltcc;": '\U00002AA6', + "ltcir;": '\U00002A79', + "ltdot;": '\U000022D6', + "lthree;": '\U000022CB', + "ltimes;": '\U000022C9', + "ltlarr;": '\U00002976', + "ltquest;": '\U00002A7B', + "ltrPar;": '\U00002996', + "ltri;": '\U000025C3', + "ltrie;": '\U000022B4', + "ltrif;": '\U000025C2', + "lurdshar;": '\U0000294A', + "luruhar;": '\U00002966', + "mDDot;": '\U0000223A', + "macr;": '\U000000AF', + "male;": '\U00002642', + "malt;": '\U00002720', + "maltese;": '\U00002720', + "map;": '\U000021A6', + "mapsto;": '\U000021A6', + "mapstodown;": '\U000021A7', + "mapstoleft;": '\U000021A4', + "mapstoup;": '\U000021A5', + "marker;": '\U000025AE', + "mcomma;": '\U00002A29', + "mcy;": '\U0000043C', + "mdash;": '\U00002014', + "measuredangle;": '\U00002221', + "mfr;": '\U0001D52A', + "mho;": '\U00002127', + "micro;": '\U000000B5', + "mid;": '\U00002223', + "midast;": '\U0000002A', + "midcir;": '\U00002AF0', + "middot;": '\U000000B7', + "minus;": '\U00002212', + "minusb;": '\U0000229F', + "minusd;": '\U00002238', + "minusdu;": '\U00002A2A', + "mlcp;": '\U00002ADB', + "mldr;": '\U00002026', + "mnplus;": '\U00002213', + "models;": '\U000022A7', + "mopf;": '\U0001D55E', + "mp;": '\U00002213', + "mscr;": '\U0001D4C2', + "mstpos;": '\U0000223E', + "mu;": '\U000003BC', + "multimap;": '\U000022B8', + "mumap;": '\U000022B8', + "nLeftarrow;": '\U000021CD', + "nLeftrightarrow;": '\U000021CE', + "nRightarrow;": '\U000021CF', + "nVDash;": '\U000022AF', + "nVdash;": '\U000022AE', + "nabla;": '\U00002207', + "nacute;": '\U00000144', + "nap;": '\U00002249', + "napos;": '\U00000149', + "napprox;": '\U00002249', + "natur;": '\U0000266E', + "natural;": '\U0000266E', + "naturals;": '\U00002115', + "nbsp;": '\U000000A0', + "ncap;": '\U00002A43', + "ncaron;": '\U00000148', + "ncedil;": '\U00000146', + "ncong;": '\U00002247', + "ncup;": '\U00002A42', + "ncy;": '\U0000043D', + "ndash;": '\U00002013', + "ne;": '\U00002260', + "neArr;": '\U000021D7', + "nearhk;": '\U00002924', + "nearr;": '\U00002197', + "nearrow;": '\U00002197', + "nequiv;": '\U00002262', + "nesear;": '\U00002928', + "nexist;": '\U00002204', + "nexists;": '\U00002204', + "nfr;": '\U0001D52B', + "nge;": '\U00002271', + "ngeq;": '\U00002271', + "ngsim;": '\U00002275', + "ngt;": '\U0000226F', + "ngtr;": '\U0000226F', + "nhArr;": '\U000021CE', + "nharr;": '\U000021AE', + "nhpar;": '\U00002AF2', + "ni;": '\U0000220B', + "nis;": '\U000022FC', + "nisd;": '\U000022FA', + "niv;": '\U0000220B', + "njcy;": '\U0000045A', + "nlArr;": '\U000021CD', + "nlarr;": '\U0000219A', + "nldr;": '\U00002025', + "nle;": '\U00002270', + "nleftarrow;": '\U0000219A', + "nleftrightarrow;": '\U000021AE', + "nleq;": '\U00002270', + "nless;": '\U0000226E', + "nlsim;": '\U00002274', + "nlt;": '\U0000226E', + "nltri;": '\U000022EA', + "nltrie;": '\U000022EC', + "nmid;": '\U00002224', + "nopf;": '\U0001D55F', + "not;": '\U000000AC', + "notin;": '\U00002209', + "notinva;": '\U00002209', + "notinvb;": '\U000022F7', + "notinvc;": '\U000022F6', + "notni;": '\U0000220C', + "notniva;": '\U0000220C', + "notnivb;": '\U000022FE', + "notnivc;": '\U000022FD', + "npar;": '\U00002226', + "nparallel;": '\U00002226', + "npolint;": '\U00002A14', + "npr;": '\U00002280', + "nprcue;": '\U000022E0', + "nprec;": '\U00002280', + "nrArr;": '\U000021CF', + "nrarr;": '\U0000219B', + "nrightarrow;": '\U0000219B', + "nrtri;": '\U000022EB', + "nrtrie;": '\U000022ED', + "nsc;": '\U00002281', + "nsccue;": '\U000022E1', + "nscr;": '\U0001D4C3', + "nshortmid;": '\U00002224', + "nshortparallel;": '\U00002226', + "nsim;": '\U00002241', + "nsime;": '\U00002244', + "nsimeq;": '\U00002244', + "nsmid;": '\U00002224', + "nspar;": '\U00002226', + "nsqsube;": '\U000022E2', + "nsqsupe;": '\U000022E3', + "nsub;": '\U00002284', + "nsube;": '\U00002288', + "nsubseteq;": '\U00002288', + "nsucc;": '\U00002281', + "nsup;": '\U00002285', + "nsupe;": '\U00002289', + "nsupseteq;": '\U00002289', + "ntgl;": '\U00002279', + "ntilde;": '\U000000F1', + "ntlg;": '\U00002278', + "ntriangleleft;": '\U000022EA', + "ntrianglelefteq;": '\U000022EC', + "ntriangleright;": '\U000022EB', + "ntrianglerighteq;": '\U000022ED', + "nu;": '\U000003BD', + "num;": '\U00000023', + "numero;": '\U00002116', + "numsp;": '\U00002007', + "nvDash;": '\U000022AD', + "nvHarr;": '\U00002904', + "nvdash;": '\U000022AC', + "nvinfin;": '\U000029DE', + "nvlArr;": '\U00002902', + "nvrArr;": '\U00002903', + "nwArr;": '\U000021D6', + "nwarhk;": '\U00002923', + "nwarr;": '\U00002196', + "nwarrow;": '\U00002196', + "nwnear;": '\U00002927', + "oS;": '\U000024C8', + "oacute;": '\U000000F3', + "oast;": '\U0000229B', + "ocir;": '\U0000229A', + "ocirc;": '\U000000F4', + "ocy;": '\U0000043E', + "odash;": '\U0000229D', + "odblac;": '\U00000151', + "odiv;": '\U00002A38', + "odot;": '\U00002299', + "odsold;": '\U000029BC', + "oelig;": '\U00000153', + "ofcir;": '\U000029BF', + "ofr;": '\U0001D52C', + "ogon;": '\U000002DB', + "ograve;": '\U000000F2', + "ogt;": '\U000029C1', + "ohbar;": '\U000029B5', + "ohm;": '\U000003A9', + "oint;": '\U0000222E', + "olarr;": '\U000021BA', + "olcir;": '\U000029BE', + "olcross;": '\U000029BB', + "oline;": '\U0000203E', + "olt;": '\U000029C0', + "omacr;": '\U0000014D', + "omega;": '\U000003C9', + "omicron;": '\U000003BF', + "omid;": '\U000029B6', + "ominus;": '\U00002296', + "oopf;": '\U0001D560', + "opar;": '\U000029B7', + "operp;": '\U000029B9', + "oplus;": '\U00002295', + "or;": '\U00002228', + "orarr;": '\U000021BB', + "ord;": '\U00002A5D', + "order;": '\U00002134', + "orderof;": '\U00002134', + "ordf;": '\U000000AA', + "ordm;": '\U000000BA', + "origof;": '\U000022B6', + "oror;": '\U00002A56', + "orslope;": '\U00002A57', + "orv;": '\U00002A5B', + "oscr;": '\U00002134', + "oslash;": '\U000000F8', + "osol;": '\U00002298', + "otilde;": '\U000000F5', + "otimes;": '\U00002297', + "otimesas;": '\U00002A36', + "ouml;": '\U000000F6', + "ovbar;": '\U0000233D', + "par;": '\U00002225', + "para;": '\U000000B6', + "parallel;": '\U00002225', + "parsim;": '\U00002AF3', + "parsl;": '\U00002AFD', + "part;": '\U00002202', + "pcy;": '\U0000043F', + "percnt;": '\U00000025', + "period;": '\U0000002E', + "permil;": '\U00002030', + "perp;": '\U000022A5', + "pertenk;": '\U00002031', + "pfr;": '\U0001D52D', + "phi;": '\U000003C6', + "phiv;": '\U000003D5', + "phmmat;": '\U00002133', + "phone;": '\U0000260E', + "pi;": '\U000003C0', + "pitchfork;": '\U000022D4', + "piv;": '\U000003D6', + "planck;": '\U0000210F', + "planckh;": '\U0000210E', + "plankv;": '\U0000210F', + "plus;": '\U0000002B', + "plusacir;": '\U00002A23', + "plusb;": '\U0000229E', + "pluscir;": '\U00002A22', + "plusdo;": '\U00002214', + "plusdu;": '\U00002A25', + "pluse;": '\U00002A72', + "plusmn;": '\U000000B1', + "plussim;": '\U00002A26', + "plustwo;": '\U00002A27', + "pm;": '\U000000B1', + "pointint;": '\U00002A15', + "popf;": '\U0001D561', + "pound;": '\U000000A3', + "pr;": '\U0000227A', + "prE;": '\U00002AB3', + "prap;": '\U00002AB7', + "prcue;": '\U0000227C', + "pre;": '\U00002AAF', + "prec;": '\U0000227A', + "precapprox;": '\U00002AB7', + "preccurlyeq;": '\U0000227C', + "preceq;": '\U00002AAF', + "precnapprox;": '\U00002AB9', + "precneqq;": '\U00002AB5', + "precnsim;": '\U000022E8', + "precsim;": '\U0000227E', + "prime;": '\U00002032', + "primes;": '\U00002119', + "prnE;": '\U00002AB5', + "prnap;": '\U00002AB9', + "prnsim;": '\U000022E8', + "prod;": '\U0000220F', + "profalar;": '\U0000232E', + "profline;": '\U00002312', + "profsurf;": '\U00002313', + "prop;": '\U0000221D', + "propto;": '\U0000221D', + "prsim;": '\U0000227E', + "prurel;": '\U000022B0', + "pscr;": '\U0001D4C5', + "psi;": '\U000003C8', + "puncsp;": '\U00002008', + "qfr;": '\U0001D52E', + "qint;": '\U00002A0C', + "qopf;": '\U0001D562', + "qprime;": '\U00002057', + "qscr;": '\U0001D4C6', + "quaternions;": '\U0000210D', + "quatint;": '\U00002A16', + "quest;": '\U0000003F', + "questeq;": '\U0000225F', + "quot;": '\U00000022', + "rAarr;": '\U000021DB', + "rArr;": '\U000021D2', + "rAtail;": '\U0000291C', + "rBarr;": '\U0000290F', + "rHar;": '\U00002964', + "racute;": '\U00000155', + "radic;": '\U0000221A', + "raemptyv;": '\U000029B3', + "rang;": '\U000027E9', + "rangd;": '\U00002992', + "range;": '\U000029A5', + "rangle;": '\U000027E9', + "raquo;": '\U000000BB', + "rarr;": '\U00002192', + "rarrap;": '\U00002975', + "rarrb;": '\U000021E5', + "rarrbfs;": '\U00002920', + "rarrc;": '\U00002933', + "rarrfs;": '\U0000291E', + "rarrhk;": '\U000021AA', + "rarrlp;": '\U000021AC', + "rarrpl;": '\U00002945', + "rarrsim;": '\U00002974', + "rarrtl;": '\U000021A3', + "rarrw;": '\U0000219D', + "ratail;": '\U0000291A', + "ratio;": '\U00002236', + "rationals;": '\U0000211A', + "rbarr;": '\U0000290D', + "rbbrk;": '\U00002773', + "rbrace;": '\U0000007D', + "rbrack;": '\U0000005D', + "rbrke;": '\U0000298C', + "rbrksld;": '\U0000298E', + "rbrkslu;": '\U00002990', + "rcaron;": '\U00000159', + "rcedil;": '\U00000157', + "rceil;": '\U00002309', + "rcub;": '\U0000007D', + "rcy;": '\U00000440', + "rdca;": '\U00002937', + "rdldhar;": '\U00002969', + "rdquo;": '\U0000201D', + "rdquor;": '\U0000201D', + "rdsh;": '\U000021B3', + "real;": '\U0000211C', + "realine;": '\U0000211B', + "realpart;": '\U0000211C', + "reals;": '\U0000211D', + "rect;": '\U000025AD', + "reg;": '\U000000AE', + "rfisht;": '\U0000297D', + "rfloor;": '\U0000230B', + "rfr;": '\U0001D52F', + "rhard;": '\U000021C1', + "rharu;": '\U000021C0', + "rharul;": '\U0000296C', + "rho;": '\U000003C1', + "rhov;": '\U000003F1', + "rightarrow;": '\U00002192', + "rightarrowtail;": '\U000021A3', + "rightharpoondown;": '\U000021C1', + "rightharpoonup;": '\U000021C0', + "rightleftarrows;": '\U000021C4', + "rightleftharpoons;": '\U000021CC', + "rightrightarrows;": '\U000021C9', + "rightsquigarrow;": '\U0000219D', + "rightthreetimes;": '\U000022CC', + "ring;": '\U000002DA', + "risingdotseq;": '\U00002253', + "rlarr;": '\U000021C4', + "rlhar;": '\U000021CC', + "rlm;": '\U0000200F', + "rmoust;": '\U000023B1', + "rmoustache;": '\U000023B1', + "rnmid;": '\U00002AEE', + "roang;": '\U000027ED', + "roarr;": '\U000021FE', + "robrk;": '\U000027E7', + "ropar;": '\U00002986', + "ropf;": '\U0001D563', + "roplus;": '\U00002A2E', + "rotimes;": '\U00002A35', + "rpar;": '\U00000029', + "rpargt;": '\U00002994', + "rppolint;": '\U00002A12', + "rrarr;": '\U000021C9', + "rsaquo;": '\U0000203A', + "rscr;": '\U0001D4C7', + "rsh;": '\U000021B1', + "rsqb;": '\U0000005D', + "rsquo;": '\U00002019', + "rsquor;": '\U00002019', + "rthree;": '\U000022CC', + "rtimes;": '\U000022CA', + "rtri;": '\U000025B9', + "rtrie;": '\U000022B5', + "rtrif;": '\U000025B8', + "rtriltri;": '\U000029CE', + "ruluhar;": '\U00002968', + "rx;": '\U0000211E', + "sacute;": '\U0000015B', + "sbquo;": '\U0000201A', + "sc;": '\U0000227B', + "scE;": '\U00002AB4', + "scap;": '\U00002AB8', + "scaron;": '\U00000161', + "sccue;": '\U0000227D', + "sce;": '\U00002AB0', + "scedil;": '\U0000015F', + "scirc;": '\U0000015D', + "scnE;": '\U00002AB6', + "scnap;": '\U00002ABA', + "scnsim;": '\U000022E9', + "scpolint;": '\U00002A13', + "scsim;": '\U0000227F', + "scy;": '\U00000441', + "sdot;": '\U000022C5', + "sdotb;": '\U000022A1', + "sdote;": '\U00002A66', + "seArr;": '\U000021D8', + "searhk;": '\U00002925', + "searr;": '\U00002198', + "searrow;": '\U00002198', + "sect;": '\U000000A7', + "semi;": '\U0000003B', + "seswar;": '\U00002929', + "setminus;": '\U00002216', + "setmn;": '\U00002216', + "sext;": '\U00002736', + "sfr;": '\U0001D530', + "sfrown;": '\U00002322', + "sharp;": '\U0000266F', + "shchcy;": '\U00000449', + "shcy;": '\U00000448', + "shortmid;": '\U00002223', + "shortparallel;": '\U00002225', + "shy;": '\U000000AD', + "sigma;": '\U000003C3', + "sigmaf;": '\U000003C2', + "sigmav;": '\U000003C2', + "sim;": '\U0000223C', + "simdot;": '\U00002A6A', + "sime;": '\U00002243', + "simeq;": '\U00002243', + "simg;": '\U00002A9E', + "simgE;": '\U00002AA0', + "siml;": '\U00002A9D', + "simlE;": '\U00002A9F', + "simne;": '\U00002246', + "simplus;": '\U00002A24', + "simrarr;": '\U00002972', + "slarr;": '\U00002190', + "smallsetminus;": '\U00002216', + "smashp;": '\U00002A33', + "smeparsl;": '\U000029E4', + "smid;": '\U00002223', + "smile;": '\U00002323', + "smt;": '\U00002AAA', + "smte;": '\U00002AAC', + "softcy;": '\U0000044C', + "sol;": '\U0000002F', + "solb;": '\U000029C4', + "solbar;": '\U0000233F', + "sopf;": '\U0001D564', + "spades;": '\U00002660', + "spadesuit;": '\U00002660', + "spar;": '\U00002225', + "sqcap;": '\U00002293', + "sqcup;": '\U00002294', + "sqsub;": '\U0000228F', + "sqsube;": '\U00002291', + "sqsubset;": '\U0000228F', + "sqsubseteq;": '\U00002291', + "sqsup;": '\U00002290', + "sqsupe;": '\U00002292', + "sqsupset;": '\U00002290', + "sqsupseteq;": '\U00002292', + "squ;": '\U000025A1', + "square;": '\U000025A1', + "squarf;": '\U000025AA', + "squf;": '\U000025AA', + "srarr;": '\U00002192', + "sscr;": '\U0001D4C8', + "ssetmn;": '\U00002216', + "ssmile;": '\U00002323', + "sstarf;": '\U000022C6', + "star;": '\U00002606', + "starf;": '\U00002605', + "straightepsilon;": '\U000003F5', + "straightphi;": '\U000003D5', + "strns;": '\U000000AF', + "sub;": '\U00002282', + "subE;": '\U00002AC5', + "subdot;": '\U00002ABD', + "sube;": '\U00002286', + "subedot;": '\U00002AC3', + "submult;": '\U00002AC1', + "subnE;": '\U00002ACB', + "subne;": '\U0000228A', + "subplus;": '\U00002ABF', + "subrarr;": '\U00002979', + "subset;": '\U00002282', + "subseteq;": '\U00002286', + "subseteqq;": '\U00002AC5', + "subsetneq;": '\U0000228A', + "subsetneqq;": '\U00002ACB', + "subsim;": '\U00002AC7', + "subsub;": '\U00002AD5', + "subsup;": '\U00002AD3', + "succ;": '\U0000227B', + "succapprox;": '\U00002AB8', + "succcurlyeq;": '\U0000227D', + "succeq;": '\U00002AB0', + "succnapprox;": '\U00002ABA', + "succneqq;": '\U00002AB6', + "succnsim;": '\U000022E9', + "succsim;": '\U0000227F', + "sum;": '\U00002211', + "sung;": '\U0000266A', + "sup;": '\U00002283', + "sup1;": '\U000000B9', + "sup2;": '\U000000B2', + "sup3;": '\U000000B3', + "supE;": '\U00002AC6', + "supdot;": '\U00002ABE', + "supdsub;": '\U00002AD8', + "supe;": '\U00002287', + "supedot;": '\U00002AC4', + "suphsol;": '\U000027C9', + "suphsub;": '\U00002AD7', + "suplarr;": '\U0000297B', + "supmult;": '\U00002AC2', + "supnE;": '\U00002ACC', + "supne;": '\U0000228B', + "supplus;": '\U00002AC0', + "supset;": '\U00002283', + "supseteq;": '\U00002287', + "supseteqq;": '\U00002AC6', + "supsetneq;": '\U0000228B', + "supsetneqq;": '\U00002ACC', + "supsim;": '\U00002AC8', + "supsub;": '\U00002AD4', + "supsup;": '\U00002AD6', + "swArr;": '\U000021D9', + "swarhk;": '\U00002926', + "swarr;": '\U00002199', + "swarrow;": '\U00002199', + "swnwar;": '\U0000292A', + "szlig;": '\U000000DF', + "target;": '\U00002316', + "tau;": '\U000003C4', + "tbrk;": '\U000023B4', + "tcaron;": '\U00000165', + "tcedil;": '\U00000163', + "tcy;": '\U00000442', + "tdot;": '\U000020DB', + "telrec;": '\U00002315', + "tfr;": '\U0001D531', + "there4;": '\U00002234', + "therefore;": '\U00002234', + "theta;": '\U000003B8', + "thetasym;": '\U000003D1', + "thetav;": '\U000003D1', + "thickapprox;": '\U00002248', + "thicksim;": '\U0000223C', + "thinsp;": '\U00002009', + "thkap;": '\U00002248', + "thksim;": '\U0000223C', + "thorn;": '\U000000FE', + "tilde;": '\U000002DC', + "times;": '\U000000D7', + "timesb;": '\U000022A0', + "timesbar;": '\U00002A31', + "timesd;": '\U00002A30', + "tint;": '\U0000222D', + "toea;": '\U00002928', + "top;": '\U000022A4', + "topbot;": '\U00002336', + "topcir;": '\U00002AF1', + "topf;": '\U0001D565', + "topfork;": '\U00002ADA', + "tosa;": '\U00002929', + "tprime;": '\U00002034', + "trade;": '\U00002122', + "triangle;": '\U000025B5', + "triangledown;": '\U000025BF', + "triangleleft;": '\U000025C3', + "trianglelefteq;": '\U000022B4', + "triangleq;": '\U0000225C', + "triangleright;": '\U000025B9', + "trianglerighteq;": '\U000022B5', + "tridot;": '\U000025EC', + "trie;": '\U0000225C', + "triminus;": '\U00002A3A', + "triplus;": '\U00002A39', + "trisb;": '\U000029CD', + "tritime;": '\U00002A3B', + "trpezium;": '\U000023E2', + "tscr;": '\U0001D4C9', + "tscy;": '\U00000446', + "tshcy;": '\U0000045B', + "tstrok;": '\U00000167', + "twixt;": '\U0000226C', + "twoheadleftarrow;": '\U0000219E', + "twoheadrightarrow;": '\U000021A0', + "uArr;": '\U000021D1', + "uHar;": '\U00002963', + "uacute;": '\U000000FA', + "uarr;": '\U00002191', + "ubrcy;": '\U0000045E', + "ubreve;": '\U0000016D', + "ucirc;": '\U000000FB', + "ucy;": '\U00000443', + "udarr;": '\U000021C5', + "udblac;": '\U00000171', + "udhar;": '\U0000296E', + "ufisht;": '\U0000297E', + "ufr;": '\U0001D532', + "ugrave;": '\U000000F9', + "uharl;": '\U000021BF', + "uharr;": '\U000021BE', + "uhblk;": '\U00002580', + "ulcorn;": '\U0000231C', + "ulcorner;": '\U0000231C', + "ulcrop;": '\U0000230F', + "ultri;": '\U000025F8', + "umacr;": '\U0000016B', + "uml;": '\U000000A8', + "uogon;": '\U00000173', + "uopf;": '\U0001D566', + "uparrow;": '\U00002191', + "updownarrow;": '\U00002195', + "upharpoonleft;": '\U000021BF', + "upharpoonright;": '\U000021BE', + "uplus;": '\U0000228E', + "upsi;": '\U000003C5', + "upsih;": '\U000003D2', + "upsilon;": '\U000003C5', + "upuparrows;": '\U000021C8', + "urcorn;": '\U0000231D', + "urcorner;": '\U0000231D', + "urcrop;": '\U0000230E', + "uring;": '\U0000016F', + "urtri;": '\U000025F9', + "uscr;": '\U0001D4CA', + "utdot;": '\U000022F0', + "utilde;": '\U00000169', + "utri;": '\U000025B5', + "utrif;": '\U000025B4', + "uuarr;": '\U000021C8', + "uuml;": '\U000000FC', + "uwangle;": '\U000029A7', + "vArr;": '\U000021D5', + "vBar;": '\U00002AE8', + "vBarv;": '\U00002AE9', + "vDash;": '\U000022A8', + "vangrt;": '\U0000299C', + "varepsilon;": '\U000003F5', + "varkappa;": '\U000003F0', + "varnothing;": '\U00002205', + "varphi;": '\U000003D5', + "varpi;": '\U000003D6', + "varpropto;": '\U0000221D', + "varr;": '\U00002195', + "varrho;": '\U000003F1', + "varsigma;": '\U000003C2', + "vartheta;": '\U000003D1', + "vartriangleleft;": '\U000022B2', + "vartriangleright;": '\U000022B3', + "vcy;": '\U00000432', + "vdash;": '\U000022A2', + "vee;": '\U00002228', + "veebar;": '\U000022BB', + "veeeq;": '\U0000225A', + "vellip;": '\U000022EE', + "verbar;": '\U0000007C', + "vert;": '\U0000007C', + "vfr;": '\U0001D533', + "vltri;": '\U000022B2', + "vopf;": '\U0001D567', + "vprop;": '\U0000221D', + "vrtri;": '\U000022B3', + "vscr;": '\U0001D4CB', + "vzigzag;": '\U0000299A', + "wcirc;": '\U00000175', + "wedbar;": '\U00002A5F', + "wedge;": '\U00002227', + "wedgeq;": '\U00002259', + "weierp;": '\U00002118', + "wfr;": '\U0001D534', + "wopf;": '\U0001D568', + "wp;": '\U00002118', + "wr;": '\U00002240', + "wreath;": '\U00002240', + "wscr;": '\U0001D4CC', + "xcap;": '\U000022C2', + "xcirc;": '\U000025EF', + "xcup;": '\U000022C3', + "xdtri;": '\U000025BD', + "xfr;": '\U0001D535', + "xhArr;": '\U000027FA', + "xharr;": '\U000027F7', + "xi;": '\U000003BE', + "xlArr;": '\U000027F8', + "xlarr;": '\U000027F5', + "xmap;": '\U000027FC', + "xnis;": '\U000022FB', + "xodot;": '\U00002A00', + "xopf;": '\U0001D569', + "xoplus;": '\U00002A01', + "xotime;": '\U00002A02', + "xrArr;": '\U000027F9', + "xrarr;": '\U000027F6', + "xscr;": '\U0001D4CD', + "xsqcup;": '\U00002A06', + "xuplus;": '\U00002A04', + "xutri;": '\U000025B3', + "xvee;": '\U000022C1', + "xwedge;": '\U000022C0', + "yacute;": '\U000000FD', + "yacy;": '\U0000044F', + "ycirc;": '\U00000177', + "ycy;": '\U0000044B', + "yen;": '\U000000A5', + "yfr;": '\U0001D536', + "yicy;": '\U00000457', + "yopf;": '\U0001D56A', + "yscr;": '\U0001D4CE', + "yucy;": '\U0000044E', + "yuml;": '\U000000FF', + "zacute;": '\U0000017A', + "zcaron;": '\U0000017E', + "zcy;": '\U00000437', + "zdot;": '\U0000017C', + "zeetrf;": '\U00002128', + "zeta;": '\U000003B6', + "zfr;": '\U0001D537', + "zhcy;": '\U00000436', + "zigrarr;": '\U000021DD', + "zopf;": '\U0001D56B', + "zscr;": '\U0001D4CF', + "zwj;": '\U0000200D', + "zwnj;": '\U0000200C', + "AElig": '\U000000C6', + "AMP": '\U00000026', + "Aacute": '\U000000C1', + "Acirc": '\U000000C2', + "Agrave": '\U000000C0', + "Aring": '\U000000C5', + "Atilde": '\U000000C3', + "Auml": '\U000000C4', + "COPY": '\U000000A9', + "Ccedil": '\U000000C7', + "ETH": '\U000000D0', + "Eacute": '\U000000C9', + "Ecirc": '\U000000CA', + "Egrave": '\U000000C8', + "Euml": '\U000000CB', + "GT": '\U0000003E', + "Iacute": '\U000000CD', + "Icirc": '\U000000CE', + "Igrave": '\U000000CC', + "Iuml": '\U000000CF', + "LT": '\U0000003C', + "Ntilde": '\U000000D1', + "Oacute": '\U000000D3', + "Ocirc": '\U000000D4', + "Ograve": '\U000000D2', + "Oslash": '\U000000D8', + "Otilde": '\U000000D5', + "Ouml": '\U000000D6', + "QUOT": '\U00000022', + "REG": '\U000000AE', + "THORN": '\U000000DE', + "Uacute": '\U000000DA', + "Ucirc": '\U000000DB', + "Ugrave": '\U000000D9', + "Uuml": '\U000000DC', + "Yacute": '\U000000DD', + "aacute": '\U000000E1', + "acirc": '\U000000E2', + "acute": '\U000000B4', + "aelig": '\U000000E6', + "agrave": '\U000000E0', + "amp": '\U00000026', + "aring": '\U000000E5', + "atilde": '\U000000E3', + "auml": '\U000000E4', + "brvbar": '\U000000A6', + "ccedil": '\U000000E7', + "cedil": '\U000000B8', + "cent": '\U000000A2', + "copy": '\U000000A9', + "curren": '\U000000A4', + "deg": '\U000000B0', + "divide": '\U000000F7', + "eacute": '\U000000E9', + "ecirc": '\U000000EA', + "egrave": '\U000000E8', + "eth": '\U000000F0', + "euml": '\U000000EB', + "frac12": '\U000000BD', + "frac14": '\U000000BC', + "frac34": '\U000000BE', + "gt": '\U0000003E', + "iacute": '\U000000ED', + "icirc": '\U000000EE', + "iexcl": '\U000000A1', + "igrave": '\U000000EC', + "iquest": '\U000000BF', + "iuml": '\U000000EF', + "laquo": '\U000000AB', + "lt": '\U0000003C', + "macr": '\U000000AF', + "micro": '\U000000B5', + "middot": '\U000000B7', + "nbsp": '\U000000A0', + "not": '\U000000AC', + "ntilde": '\U000000F1', + "oacute": '\U000000F3', + "ocirc": '\U000000F4', + "ograve": '\U000000F2', + "ordf": '\U000000AA', + "ordm": '\U000000BA', + "oslash": '\U000000F8', + "otilde": '\U000000F5', + "ouml": '\U000000F6', + "para": '\U000000B6', + "plusmn": '\U000000B1', + "pound": '\U000000A3', + "quot": '\U00000022', + "raquo": '\U000000BB', + "reg": '\U000000AE', + "sect": '\U000000A7', + "shy": '\U000000AD', + "sup1": '\U000000B9', + "sup2": '\U000000B2', + "sup3": '\U000000B3', + "szlig": '\U000000DF', + "thorn": '\U000000FE', + "times": '\U000000D7', + "uacute": '\U000000FA', + "ucirc": '\U000000FB', + "ugrave": '\U000000F9', + "uml": '\U000000A8', + "uuml": '\U000000FC', + "yacute": '\U000000FD', + "yen": '\U000000A5', + "yuml": '\U000000FF', +} + +// HTML entities that are two unicode codepoints. +var entity2 = map[string][2]rune{ + // TODO(nigeltao): Handle replacements that are wider than their names. + // "nLt;": {'\u226A', '\u20D2'}, + // "nGt;": {'\u226B', '\u20D2'}, + "NotEqualTilde;": {'\u2242', '\u0338'}, + "NotGreaterFullEqual;": {'\u2267', '\u0338'}, + "NotGreaterGreater;": {'\u226B', '\u0338'}, + "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, + "NotHumpDownHump;": {'\u224E', '\u0338'}, + "NotHumpEqual;": {'\u224F', '\u0338'}, + "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, + "NotLessLess;": {'\u226A', '\u0338'}, + "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, + "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, + "NotNestedLessLess;": {'\u2AA1', '\u0338'}, + "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, + "NotRightTriangleBar;": {'\u29D0', '\u0338'}, + "NotSquareSubset;": {'\u228F', '\u0338'}, + "NotSquareSuperset;": {'\u2290', '\u0338'}, + "NotSubset;": {'\u2282', '\u20D2'}, + "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, + "NotSucceedsTilde;": {'\u227F', '\u0338'}, + "NotSuperset;": {'\u2283', '\u20D2'}, + "ThickSpace;": {'\u205F', '\u200A'}, + "acE;": {'\u223E', '\u0333'}, + "bne;": {'\u003D', '\u20E5'}, + "bnequiv;": {'\u2261', '\u20E5'}, + "caps;": {'\u2229', '\uFE00'}, + "cups;": {'\u222A', '\uFE00'}, + "fjlig;": {'\u0066', '\u006A'}, + "gesl;": {'\u22DB', '\uFE00'}, + "gvertneqq;": {'\u2269', '\uFE00'}, + "gvnE;": {'\u2269', '\uFE00'}, + "lates;": {'\u2AAD', '\uFE00'}, + "lesg;": {'\u22DA', '\uFE00'}, + "lvertneqq;": {'\u2268', '\uFE00'}, + "lvnE;": {'\u2268', '\uFE00'}, + "nGg;": {'\u22D9', '\u0338'}, + "nGtv;": {'\u226B', '\u0338'}, + "nLl;": {'\u22D8', '\u0338'}, + "nLtv;": {'\u226A', '\u0338'}, + "nang;": {'\u2220', '\u20D2'}, + "napE;": {'\u2A70', '\u0338'}, + "napid;": {'\u224B', '\u0338'}, + "nbump;": {'\u224E', '\u0338'}, + "nbumpe;": {'\u224F', '\u0338'}, + "ncongdot;": {'\u2A6D', '\u0338'}, + "nedot;": {'\u2250', '\u0338'}, + "nesim;": {'\u2242', '\u0338'}, + "ngE;": {'\u2267', '\u0338'}, + "ngeqq;": {'\u2267', '\u0338'}, + "ngeqslant;": {'\u2A7E', '\u0338'}, + "nges;": {'\u2A7E', '\u0338'}, + "nlE;": {'\u2266', '\u0338'}, + "nleqq;": {'\u2266', '\u0338'}, + "nleqslant;": {'\u2A7D', '\u0338'}, + "nles;": {'\u2A7D', '\u0338'}, + "notinE;": {'\u22F9', '\u0338'}, + "notindot;": {'\u22F5', '\u0338'}, + "nparsl;": {'\u2AFD', '\u20E5'}, + "npart;": {'\u2202', '\u0338'}, + "npre;": {'\u2AAF', '\u0338'}, + "npreceq;": {'\u2AAF', '\u0338'}, + "nrarrc;": {'\u2933', '\u0338'}, + "nrarrw;": {'\u219D', '\u0338'}, + "nsce;": {'\u2AB0', '\u0338'}, + "nsubE;": {'\u2AC5', '\u0338'}, + "nsubset;": {'\u2282', '\u20D2'}, + "nsubseteqq;": {'\u2AC5', '\u0338'}, + "nsucceq;": {'\u2AB0', '\u0338'}, + "nsupE;": {'\u2AC6', '\u0338'}, + "nsupset;": {'\u2283', '\u20D2'}, + "nsupseteqq;": {'\u2AC6', '\u0338'}, + "nvap;": {'\u224D', '\u20D2'}, + "nvge;": {'\u2265', '\u20D2'}, + "nvgt;": {'\u003E', '\u20D2'}, + "nvle;": {'\u2264', '\u20D2'}, + "nvlt;": {'\u003C', '\u20D2'}, + "nvltrie;": {'\u22B4', '\u20D2'}, + "nvrtrie;": {'\u22B5', '\u20D2'}, + "nvsim;": {'\u223C', '\u20D2'}, + "race;": {'\u223D', '\u0331'}, + "smtes;": {'\u2AAC', '\uFE00'}, + "sqcaps;": {'\u2293', '\uFE00'}, + "sqcups;": {'\u2294', '\uFE00'}, + "varsubsetneq;": {'\u228A', '\uFE00'}, + "varsubsetneqq;": {'\u2ACB', '\uFE00'}, + "varsupsetneq;": {'\u228B', '\uFE00'}, + "varsupsetneqq;": {'\u2ACC', '\uFE00'}, + "vnsub;": {'\u2282', '\u20D2'}, + "vnsup;": {'\u2283', '\u20D2'}, + "vsubnE;": {'\u2ACB', '\uFE00'}, + "vsubne;": {'\u228A', '\uFE00'}, + "vsupnE;": {'\u2ACC', '\uFE00'}, + "vsupne;": {'\u228B', '\uFE00'}, +} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go new file mode 100644 index 0000000000..04c6bec210 --- /dev/null +++ b/vendor/golang.org/x/net/html/escape.go @@ -0,0 +1,339 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// These replacements permit compatibility with old numeric entities that +// assumed Windows-1252 encoding. +// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference +var replacementTable = [...]rune{ + '\u20AC', // First entry is what 0x80 should be replaced with. + '\u0081', + '\u201A', + '\u0192', + '\u201E', + '\u2026', + '\u2020', + '\u2021', + '\u02C6', + '\u2030', + '\u0160', + '\u2039', + '\u0152', + '\u008D', + '\u017D', + '\u008F', + '\u0090', + '\u2018', + '\u2019', + '\u201C', + '\u201D', + '\u2022', + '\u2013', + '\u2014', + '\u02DC', + '\u2122', + '\u0161', + '\u203A', + '\u0153', + '\u009D', + '\u017E', + '\u0178', // Last entry is 0x9F. + // 0x00->'\uFFFD' is handled programmatically. + // 0x0D->'\u000D' is a no-op. +} + +// unescapeEntity reads an entity like "<" from b[src:] and writes the +// corresponding "<" to b[dst:], returning the incremented dst and src cursors. +// Precondition: b[src] == '&' && dst <= src. +// attribute should be true if parsing an attribute value. +func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { + // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference + + // i starts at 1 because we already know that s[0] == '&'. + i, s := 1, b[src:] + + if len(s) <= 1 { + b[dst] = b[src] + return dst + 1, src + 1 + } + + if s[i] == '#' { + if len(s) <= 3 { // We need to have at least "&#.". + b[dst] = b[src] + return dst + 1, src + 1 + } + i++ + c := s[i] + hex := false + if c == 'x' || c == 'X' { + hex = true + i++ + } + + x := '\x00' + for i < len(s) { + c = s[i] + i++ + if hex { + if '0' <= c && c <= '9' { + x = 16*x + rune(c) - '0' + continue + } else if 'a' <= c && c <= 'f' { + x = 16*x + rune(c) - 'a' + 10 + continue + } else if 'A' <= c && c <= 'F' { + x = 16*x + rune(c) - 'A' + 10 + continue + } + } else if '0' <= c && c <= '9' { + x = 10*x + rune(c) - '0' + continue + } + if c != ';' { + i-- + } + break + } + + if i <= 3 { // No characters matched. + b[dst] = b[src] + return dst + 1, src + 1 + } + + if 0x80 <= x && x <= 0x9F { + // Replace characters from Windows-1252 with UTF-8 equivalents. + x = replacementTable[x-0x80] + } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { + // Replace invalid characters with the replacement character. + x = '\uFFFD' + } + + return dst + utf8.EncodeRune(b[dst:], x), src + i + } + + // Consume the maximum number of characters possible, with the + // consumed characters matching one of the named references. + + for i < len(s) { + c := s[i] + i++ + // Lower-cased characters are more common in entities, so we check for them first. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + continue + } + if c != ';' { + i-- + } + break + } + + entityName := string(s[1:i]) + if entityName == "" { + // No-op. + } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { + // No-op. + } else if x := entity[entityName]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + i + } else if x := entity2[entityName]; x[0] != 0 { + dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) + return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i + } else if !attribute { + maxLen := len(entityName) - 1 + if maxLen > longestEntityWithoutSemicolon { + maxLen = longestEntityWithoutSemicolon + } + for j := maxLen; j > 1; j-- { + if x := entity[entityName[:j]]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 + } + } + } + + dst1, src1 = dst+i, src+i + copy(b[dst:dst1], b[src:src1]) + return dst1, src1 +} + +// unescape unescapes b's entities in-place, so that "a<b" becomes "a' byte that, per above, we'd like to avoid escaping unless we have to. +// +// Studying the summary table (and T actions in its '>' column) closely, we +// only need to escape in states 43, 44, 49, 51 and 52. State 43 is at the +// start of the comment data. State 52 is after a '!'. The other three states +// are after a '-'. +// +// Our algorithm is thus to escape every '&' and to escape '>' if and only if: +// - The '>' is after a '!' or '-' (in the unescaped data) or +// - The '>' is at the start of the comment data (after the opening ""); err != nil { + return err + } + return nil + case DoctypeNode: + if _, err := w.WriteString("') + case RawNode: + _, err := w.WriteString(n.Data) + return err + default: + return errors.New("html: unknown node type") + } + + // Render the opening tag. + if err := w.WriteByte('<'); err != nil { + return err + } + if _, err := w.WriteString(n.Data); err != nil { + return err + } + for _, a := range n.Attr { + if err := w.WriteByte(' '); err != nil { + return err + } + if a.Namespace != "" { + if _, err := w.WriteString(a.Namespace); err != nil { + return err + } + if err := w.WriteByte(':'); err != nil { + return err + } + } + if _, err := w.WriteString(a.Key); err != nil { + return err + } + if _, err := w.WriteString(`="`); err != nil { + return err + } + if err := escape(w, a.Val); err != nil { + return err + } + if err := w.WriteByte('"'); err != nil { + return err + } + } + if voidElements[n.Data] { + if n.FirstChild != nil { + return fmt.Errorf("html: void element <%s> has child nodes", n.Data) + } + _, err := w.WriteString("/>") + return err + } + if err := w.WriteByte('>'); err != nil { + return err + } + + // Add initial newline where there is danger of a newline beging ignored. + if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") { + switch n.Data { + case "pre", "listing", "textarea": + if err := w.WriteByte('\n'); err != nil { + return err + } + } + } + + // Render any child nodes + if childTextNodesAreLiteral(n) { + for c := n.FirstChild; c != nil; c = c.NextSibling { + if c.Type == TextNode { + if _, err := w.WriteString(c.Data); err != nil { + return err + } + } else { + if err := render1(w, c); err != nil { + return err + } + } + } + if n.Data == "plaintext" { + // Don't render anything else. must be the + // last element in the file, with no closing tag. + return plaintextAbort + } + } else { + for c := n.FirstChild; c != nil; c = c.NextSibling { + if err := render1(w, c); err != nil { + return err + } + } + } + + // Render the </xxx> closing tag. + if _, err := w.WriteString("</"); err != nil { + return err + } + if _, err := w.WriteString(n.Data); err != nil { + return err + } + return w.WriteByte('>') +} + +func childTextNodesAreLiteral(n *Node) bool { + // Per WHATWG HTML 13.3, if the parent of the current node is a style, + // script, xmp, iframe, noembed, noframes, or plaintext element, and the + // current node is a text node, append the value of the node's data + // literally. The specification is not explicit about it, but we only + // enforce this if we are in the HTML namespace (i.e. when the namespace is + // ""). + // NOTE: we also always include noscript elements, although the + // specification states that they should only be rendered as such if + // scripting is enabled for the node (which is not something we track). + if n.Namespace != "" { + return false + } + switch n.Data { + case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp": + return true + default: + return false + } +} + +// writeQuoted writes s to w surrounded by quotes. Normally it will use double +// quotes, but if s contains a double quote, it will use single quotes. +// It is used for writing the identifiers in a doctype declaration. +// In valid HTML, they can't contain both types of quotes. +func writeQuoted(w writer, s string) error { + var q byte = '"' + if strings.Contains(s, `"`) { + q = '\'' + } + if err := w.WriteByte(q); err != nil { + return err + } + if _, err := w.WriteString(s); err != nil { + return err + } + if err := w.WriteByte(q); err != nil { + return err + } + return nil +} + +// Section 12.1.2, "Elements", gives this list of void elements. Void elements +// are those that can't have any contents. +var voidElements = map[string]bool{ + "area": true, + "base": true, + "br": true, + "col": true, + "embed": true, + "hr": true, + "img": true, + "input": true, + "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility. + "link": true, + "meta": true, + "param": true, + "source": true, + "track": true, + "wbr": true, +} diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go new file mode 100644 index 0000000000..de67f938a1 --- /dev/null +++ b/vendor/golang.org/x/net/html/token.go @@ -0,0 +1,1268 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "errors" + "io" + "strconv" + "strings" + + "golang.org/x/net/html/atom" +) + +// A TokenType is the type of a Token. +type TokenType uint32 + +const ( + // ErrorToken means that an error occurred during tokenization. + ErrorToken TokenType = iota + // TextToken means a text node. + TextToken + // A StartTagToken looks like <a>. + StartTagToken + // An EndTagToken looks like </a>. + EndTagToken + // A SelfClosingTagToken tag looks like <br/>. + SelfClosingTagToken + // A CommentToken looks like <!--x-->. + CommentToken + // A DoctypeToken looks like <!DOCTYPE x> + DoctypeToken +) + +// ErrBufferExceeded means that the buffering limit was exceeded. +var ErrBufferExceeded = errors.New("max buffer exceeded") + +// String returns a string representation of the TokenType. +func (t TokenType) String() string { + switch t { + case ErrorToken: + return "Error" + case TextToken: + return "Text" + case StartTagToken: + return "StartTag" + case EndTagToken: + return "EndTag" + case SelfClosingTagToken: + return "SelfClosingTag" + case CommentToken: + return "Comment" + case DoctypeToken: + return "Doctype" + } + return "Invalid(" + strconv.Itoa(int(t)) + ")" +} + +// An Attribute is an attribute namespace-key-value triple. Namespace is +// non-empty for foreign attributes like xlink, Key is alphabetic (and hence +// does not contain escapable characters like '&', '<' or '>'), and Val is +// unescaped (it looks like "a<b" rather than "a&lt;b"). +// +// Namespace is only used by the parser, not the tokenizer. +type Attribute struct { + Namespace, Key, Val string +} + +// A Token consists of a TokenType and some Data (tag name for start and end +// tags, content for text, comments and doctypes). A tag Token may also contain +// a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b" +// rather than "a&lt;b"). For tag Tokens, DataAtom is the atom for Data, or +// zero if Data is not a known tag name. +type Token struct { + Type TokenType + DataAtom atom.Atom + Data string + Attr []Attribute +} + +// tagString returns a string representation of a tag Token's Data and Attr. +func (t Token) tagString() string { + if len(t.Attr) == 0 { + return t.Data + } + buf := bytes.NewBufferString(t.Data) + for _, a := range t.Attr { + buf.WriteByte(' ') + buf.WriteString(a.Key) + buf.WriteString(`="`) + escape(buf, a.Val) + buf.WriteByte('"') + } + return buf.String() +} + +// String returns a string representation of the Token. +func (t Token) String() string { + switch t.Type { + case ErrorToken: + return "" + case TextToken: + return EscapeString(t.Data) + case StartTagToken: + return "<" + t.tagString() + ">" + case EndTagToken: + return "</" + t.tagString() + ">" + case SelfClosingTagToken: + return "<" + t.tagString() + "/>" + case CommentToken: + return "<!--" + escapeCommentString(t.Data) + "-->" + case DoctypeToken: + return "<!DOCTYPE " + EscapeString(t.Data) + ">" + } + return "Invalid(" + strconv.Itoa(int(t.Type)) + ")" +} + +// span is a range of bytes in a Tokenizer's buffer. The start is inclusive, +// the end is exclusive. +type span struct { + start, end int +} + +// A Tokenizer returns a stream of HTML Tokens. +type Tokenizer struct { + // r is the source of the HTML text. + r io.Reader + // tt is the TokenType of the current token. + tt TokenType + // err is the first error encountered during tokenization. It is possible + // for tt != Error && err != nil to hold: this means that Next returned a + // valid token but the subsequent Next call will return an error token. + // For example, if the HTML text input was just "plain", then the first + // Next call would set z.err to io.EOF but return a TextToken, and all + // subsequent Next calls would return an ErrorToken. + // err is never reset. Once it becomes non-nil, it stays non-nil. + err error + // readErr is the error returned by the io.Reader r. It is separate from + // err because it is valid for an io.Reader to return (n int, err1 error) + // such that n > 0 && err1 != nil, and callers should always process the + // n > 0 bytes before considering the error err1. + readErr error + // buf[raw.start:raw.end] holds the raw bytes of the current token. + // buf[raw.end:] is buffered input that will yield future tokens. + raw span + buf []byte + // maxBuf limits the data buffered in buf. A value of 0 means unlimited. + maxBuf int + // buf[data.start:data.end] holds the raw bytes of the current token's data: + // a text token's text, a tag token's tag name, etc. + data span + // pendingAttr is the attribute key and value currently being tokenized. + // When complete, pendingAttr is pushed onto attr. nAttrReturned is + // incremented on each call to TagAttr. + pendingAttr [2]span + attr [][2]span + nAttrReturned int + // rawTag is the "script" in "</script>" that closes the next token. If + // non-empty, the subsequent call to Next will return a raw or RCDATA text + // token: one that treats "<p>" as text instead of an element. + // rawTag's contents are lower-cased. + rawTag string + // textIsRaw is whether the current text token's data is not escaped. + textIsRaw bool + // convertNUL is whether NUL bytes in the current token's data should + // be converted into \ufffd replacement characters. + convertNUL bool + // allowCDATA is whether CDATA sections are allowed in the current context. + allowCDATA bool +} + +// AllowCDATA sets whether or not the tokenizer recognizes <![CDATA[foo]]> as +// the text "foo". The default value is false, which means to recognize it as +// a bogus comment "<!-- [CDATA[foo]] -->" instead. +// +// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and +// only if tokenizing foreign content, such as MathML and SVG. However, +// tracking foreign-contentness is difficult to do purely in the tokenizer, +// as opposed to the parser, due to HTML integration points: an <svg> element +// can contain a <foreignObject> that is foreign-to-SVG but not foreign-to- +// HTML. For strict compliance with the HTML5 tokenization algorithm, it is the +// responsibility of the user of a tokenizer to call AllowCDATA as appropriate. +// In practice, if using the tokenizer without caring whether MathML or SVG +// CDATA is text or comments, such as tokenizing HTML to find all the anchor +// text, it is acceptable to ignore this responsibility. +func (z *Tokenizer) AllowCDATA(allowCDATA bool) { + z.allowCDATA = allowCDATA +} + +// NextIsNotRawText instructs the tokenizer that the next token should not be +// considered as 'raw text'. Some elements, such as script and title elements, +// normally require the next token after the opening tag to be 'raw text' that +// has no child elements. For example, tokenizing "<title>a<b>c</b>d</title>" +// yields a start tag token for "<title>", a text token for "a<b>c</b>d", and +// an end tag token for "</title>". There are no distinct start tag or end tag +// tokens for the "<b>" and "</b>". +// +// This tokenizer implementation will generally look for raw text at the right +// times. Strictly speaking, an HTML5 compliant tokenizer should not look for +// raw text if in foreign content: <title> generally needs raw text, but a +// <title> inside an <svg> does not. Another example is that a <textarea> +// generally needs raw text, but a <textarea> is not allowed as an immediate +// child of a <select>; in normal parsing, a <textarea> implies </select>, but +// one cannot close the implicit element when parsing a <select>'s InnerHTML. +// Similarly to AllowCDATA, tracking the correct moment to override raw-text- +// ness is difficult to do purely in the tokenizer, as opposed to the parser. +// For strict compliance with the HTML5 tokenization algorithm, it is the +// responsibility of the user of a tokenizer to call NextIsNotRawText as +// appropriate. In practice, like AllowCDATA, it is acceptable to ignore this +// responsibility for basic usage. +// +// Note that this 'raw text' concept is different from the one offered by the +// Tokenizer.Raw method. +func (z *Tokenizer) NextIsNotRawText() { + z.rawTag = "" +} + +// Err returns the error associated with the most recent ErrorToken token. +// This is typically io.EOF, meaning the end of tokenization. +func (z *Tokenizer) Err() error { + if z.tt != ErrorToken { + return nil + } + return z.err +} + +// readByte returns the next byte from the input stream, doing a buffered read +// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte +// slice that holds all the bytes read so far for the current token. +// It sets z.err if the underlying reader returns an error. +// Pre-condition: z.err == nil. +func (z *Tokenizer) readByte() byte { + if z.raw.end >= len(z.buf) { + // Our buffer is exhausted and we have to read from z.r. Check if the + // previous read resulted in an error. + if z.readErr != nil { + z.err = z.readErr + return 0 + } + // We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length + // z.raw.end - z.raw.start is more than half the capacity of z.buf, then we + // allocate a new buffer before the copy. + c := cap(z.buf) + d := z.raw.end - z.raw.start + var buf1 []byte + if 2*d > c { + buf1 = make([]byte, d, 2*c) + } else { + buf1 = z.buf[:d] + } + copy(buf1, z.buf[z.raw.start:z.raw.end]) + if x := z.raw.start; x != 0 { + // Adjust the data/attr spans to refer to the same contents after the copy. + z.data.start -= x + z.data.end -= x + z.pendingAttr[0].start -= x + z.pendingAttr[0].end -= x + z.pendingAttr[1].start -= x + z.pendingAttr[1].end -= x + for i := range z.attr { + z.attr[i][0].start -= x + z.attr[i][0].end -= x + z.attr[i][1].start -= x + z.attr[i][1].end -= x + } + } + z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d] + // Now that we have copied the live bytes to the start of the buffer, + // we read from z.r into the remainder. + var n int + n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)]) + if n == 0 { + z.err = z.readErr + return 0 + } + z.buf = buf1[:d+n] + } + x := z.buf[z.raw.end] + z.raw.end++ + if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf { + z.err = ErrBufferExceeded + return 0 + } + return x +} + +// Buffered returns a slice containing data buffered but not yet tokenized. +func (z *Tokenizer) Buffered() []byte { + return z.buf[z.raw.end:] +} + +// readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil). +// It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil) +// too many times in succession. +func readAtLeastOneByte(r io.Reader, b []byte) (int, error) { + for i := 0; i < 100; i++ { + if n, err := r.Read(b); n != 0 || err != nil { + return n, err + } + } + return 0, io.ErrNoProgress +} + +// skipWhiteSpace skips past any white space. +func (z *Tokenizer) skipWhiteSpace() { + if z.err != nil { + return + } + for { + c := z.readByte() + if z.err != nil { + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + // No-op. + default: + z.raw.end-- + return + } + } +} + +// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and +// is typically something like "script" or "textarea". +func (z *Tokenizer) readRawOrRCDATA() { + if z.rawTag == "script" { + z.readScript() + z.textIsRaw = true + z.rawTag = "" + return + } +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + c = z.readByte() + if z.err != nil { + break loop + } + if c != '/' { + z.raw.end-- + continue loop + } + if z.readRawEndTag() || z.err != nil { + break loop + } + } + z.data.end = z.raw.end + // A textarea's or title's RCDATA can contain escaped entities. + z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title" + z.rawTag = "" +} + +// readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag. +// If it succeeds, it backs up the input position to reconsume the tag and +// returns true. Otherwise it returns false. The opening "</" has already been +// consumed. +func (z *Tokenizer) readRawEndTag() bool { + for i := 0; i < len(z.rawTag); i++ { + c := z.readByte() + if z.err != nil { + return false + } + if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') { + z.raw.end-- + return false + } + } + c := z.readByte() + if z.err != nil { + return false + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // The 3 is 2 for the leading "</" plus 1 for the trailing character c. + z.raw.end -= 3 + len(z.rawTag) + return true + } + z.raw.end-- + return false +} + +// readScript reads until the next </script> tag, following the byzantine +// rules for escaping/hiding the closing tag. +func (z *Tokenizer) readScript() { + defer func() { + z.data.end = z.raw.end + }() + var c byte + +scriptData: + c = z.readByte() + if z.err != nil { + return + } + if c == '<' { + goto scriptDataLessThanSign + } + goto scriptData + +scriptDataLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '/': + goto scriptDataEndTagOpen + case '!': + goto scriptDataEscapeStart + } + z.raw.end-- + goto scriptData + +scriptDataEndTagOpen: + if z.readRawEndTag() || z.err != nil { + return + } + goto scriptData + +scriptDataEscapeStart: + c = z.readByte() + if z.err != nil { + return + } + if c == '-' { + goto scriptDataEscapeStartDash + } + z.raw.end-- + goto scriptData + +scriptDataEscapeStartDash: + c = z.readByte() + if z.err != nil { + return + } + if c == '-' { + goto scriptDataEscapedDashDash + } + z.raw.end-- + goto scriptData + +scriptDataEscaped: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDash + case '<': + goto scriptDataEscapedLessThanSign + } + goto scriptDataEscaped + +scriptDataEscapedDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDashDash + case '<': + goto scriptDataEscapedLessThanSign + } + goto scriptDataEscaped + +scriptDataEscapedDashDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDashDash + case '<': + goto scriptDataEscapedLessThanSign + case '>': + goto scriptData + } + goto scriptDataEscaped + +scriptDataEscapedLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + if c == '/' { + goto scriptDataEscapedEndTagOpen + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + goto scriptDataDoubleEscapeStart + } + z.raw.end-- + goto scriptData + +scriptDataEscapedEndTagOpen: + if z.readRawEndTag() || z.err != nil { + return + } + goto scriptDataEscaped + +scriptDataDoubleEscapeStart: + z.raw.end-- + for i := 0; i < len("script"); i++ { + c = z.readByte() + if z.err != nil { + return + } + if c != "script"[i] && c != "SCRIPT"[i] { + z.raw.end-- + goto scriptDataEscaped + } + } + c = z.readByte() + if z.err != nil { + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + goto scriptDataDoubleEscaped + } + z.raw.end-- + goto scriptDataEscaped + +scriptDataDoubleEscaped: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDashDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedDashDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDashDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + case '>': + goto scriptData + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + if c == '/' { + goto scriptDataDoubleEscapeEnd + } + z.raw.end-- + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapeEnd: + if z.readRawEndTag() { + z.raw.end += len("</script>") + goto scriptDataEscaped + } + if z.err != nil { + return + } + goto scriptDataDoubleEscaped +} + +// readComment reads the next comment token starting with "<!--". The opening +// "<!--" has already been consumed. +func (z *Tokenizer) readComment() { + // When modifying this function, consider manually increasing the + // maxSuffixLen constant in func TestComments, from 6 to e.g. 9 or more. + // That increase should only be temporary, not committed, as it + // exponentially affects the test running time. + + z.data.start = z.raw.end + defer func() { + if z.data.end < z.data.start { + // It's a comment with no data, like <!-->. + z.data.end = z.data.start + } + }() + + var dashCount int + beginning := true + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.calculateAbruptCommentDataEnd() + return + } + switch c { + case '-': + dashCount++ + continue + case '>': + if dashCount >= 2 || beginning { + z.data.end = z.raw.end - len("-->") + return + } + case '!': + if dashCount >= 2 { + c = z.readByte() + if z.err != nil { + z.data.end = z.calculateAbruptCommentDataEnd() + return + } else if c == '>' { + z.data.end = z.raw.end - len("--!>") + return + } else if c == '-' { + dashCount = 1 + beginning = false + continue + } + } + } + dashCount = 0 + beginning = false + } +} + +func (z *Tokenizer) calculateAbruptCommentDataEnd() int { + raw := z.Raw() + const prefixLen = len("<!--") + if len(raw) >= prefixLen { + raw = raw[prefixLen:] + if hasSuffix(raw, "--!") { + return z.raw.end - 3 + } else if hasSuffix(raw, "--") { + return z.raw.end - 2 + } else if hasSuffix(raw, "-") { + return z.raw.end - 1 + } + } + return z.raw.end +} + +func hasSuffix(b []byte, suffix string) bool { + if len(b) < len(suffix) { + return false + } + b = b[len(b)-len(suffix):] + for i := range b { + if b[i] != suffix[i] { + return false + } + } + return true +} + +// readUntilCloseAngle reads until the next ">". +func (z *Tokenizer) readUntilCloseAngle() { + z.data.start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + if c == '>' { + z.data.end = z.raw.end - len(">") + return + } + } +} + +// readMarkupDeclaration reads the next token starting with "<!". It might be +// a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or +// "<!a bogus comment". The opening "<!" has already been consumed. +func (z *Tokenizer) readMarkupDeclaration() TokenType { + z.data.start = z.raw.end + var c [2]byte + for i := 0; i < 2; i++ { + c[i] = z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return CommentToken + } + } + if c[0] == '-' && c[1] == '-' { + z.readComment() + return CommentToken + } + z.raw.end -= 2 + if z.readDoctype() { + return DoctypeToken + } + if z.allowCDATA && z.readCDATA() { + z.convertNUL = true + return TextToken + } + // It's a bogus comment. + z.readUntilCloseAngle() + return CommentToken +} + +// readDoctype attempts to read a doctype declaration and returns true if +// successful. The opening "<!" has already been consumed. +func (z *Tokenizer) readDoctype() bool { + const s = "DOCTYPE" + for i := 0; i < len(s); i++ { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return false + } + if c != s[i] && c != s[i]+('a'-'A') { + // Back up to read the fragment of "DOCTYPE" again. + z.raw.end = z.data.start + return false + } + } + if z.skipWhiteSpace(); z.err != nil { + z.data.start = z.raw.end + z.data.end = z.raw.end + return true + } + z.readUntilCloseAngle() + return true +} + +// readCDATA attempts to read a CDATA section and returns true if +// successful. The opening "<!" has already been consumed. +func (z *Tokenizer) readCDATA() bool { + const s = "[CDATA[" + for i := 0; i < len(s); i++ { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return false + } + if c != s[i] { + // Back up to read the fragment of "[CDATA[" again. + z.raw.end = z.data.start + return false + } + } + z.data.start = z.raw.end + brackets := 0 + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return true + } + switch c { + case ']': + brackets++ + case '>': + if brackets >= 2 { + z.data.end = z.raw.end - len("]]>") + return true + } + brackets = 0 + default: + brackets = 0 + } + } +} + +// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end] +// case-insensitively matches any element of ss. +func (z *Tokenizer) startTagIn(ss ...string) bool { +loop: + for _, s := range ss { + if z.data.end-z.data.start != len(s) { + continue loop + } + for i := 0; i < len(s); i++ { + c := z.buf[z.data.start+i] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + if c != s[i] { + continue loop + } + } + return true + } + return false +} + +// readStartTag reads the next start tag token. The opening "<a" has already +// been consumed, where 'a' means anything in [A-Za-z]. +func (z *Tokenizer) readStartTag() TokenType { + z.readTag(true) + if z.err != nil { + return ErrorToken + } + // Several tags flag the tokenizer's next token as raw. + c, raw := z.buf[z.data.start], false + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + switch c { + case 'i': + raw = z.startTagIn("iframe") + case 'n': + raw = z.startTagIn("noembed", "noframes", "noscript") + case 'p': + raw = z.startTagIn("plaintext") + case 's': + raw = z.startTagIn("script", "style") + case 't': + raw = z.startTagIn("textarea", "title") + case 'x': + raw = z.startTagIn("xmp") + } + if raw { + z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end])) + } + // Look for a self-closing token like "<br/>". + if z.err == nil && z.buf[z.raw.end-2] == '/' { + return SelfClosingTagToken + } + return StartTagToken +} + +// readTag reads the next tag token and its attributes. If saveAttr, those +// attributes are saved in z.attr, otherwise z.attr is set to an empty slice. +// The opening "<a" or "</a" has already been consumed, where 'a' means anything +// in [A-Za-z]. +func (z *Tokenizer) readTag(saveAttr bool) { + z.attr = z.attr[:0] + z.nAttrReturned = 0 + // Read the tag name and attribute key/value pairs. + z.readTagName() + if z.skipWhiteSpace(); z.err != nil { + return + } + for { + c := z.readByte() + if z.err != nil || c == '>' { + break + } + z.raw.end-- + z.readTagAttrKey() + z.readTagAttrVal() + // Save pendingAttr if saveAttr and that attribute has a non-empty key. + if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end { + z.attr = append(z.attr, z.pendingAttr) + } + if z.skipWhiteSpace(); z.err != nil { + break + } + } +} + +// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end) +// is positioned such that the first byte of the tag name (the "d" in "<div") +// has already been consumed. +func (z *Tokenizer) readTagName() { + z.data.start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.data.end = z.raw.end - 1 + return + case '/', '>': + z.raw.end-- + z.data.end = z.raw.end + return + } + } +} + +// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>". +// Precondition: z.err == nil. +func (z *Tokenizer) readTagAttrKey() { + z.pendingAttr[0].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[0].end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/': + z.pendingAttr[0].end = z.raw.end - 1 + return + case '=': + if z.pendingAttr[0].start+1 == z.raw.end { + // WHATWG 13.2.5.32, if we see an equals sign before the attribute name + // begins, we treat it as a character in the attribute name and continue. + continue + } + fallthrough + case '>': + z.raw.end-- + z.pendingAttr[0].end = z.raw.end + return + } + } +} + +// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>". +func (z *Tokenizer) readTagAttrVal() { + z.pendingAttr[1].start = z.raw.end + z.pendingAttr[1].end = z.raw.end + if z.skipWhiteSpace(); z.err != nil { + return + } + c := z.readByte() + if z.err != nil { + return + } + if c != '=' { + z.raw.end-- + return + } + if z.skipWhiteSpace(); z.err != nil { + return + } + quote := z.readByte() + if z.err != nil { + return + } + switch quote { + case '>': + z.raw.end-- + return + + case '\'', '"': + z.pendingAttr[1].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + if c == quote { + z.pendingAttr[1].end = z.raw.end - 1 + return + } + } + + default: + z.pendingAttr[1].start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.pendingAttr[1].end = z.raw.end - 1 + return + case '>': + z.raw.end-- + z.pendingAttr[1].end = z.raw.end + return + } + } + } +} + +// Next scans the next token and returns its type. +func (z *Tokenizer) Next() TokenType { + z.raw.start = z.raw.end + z.data.start = z.raw.end + z.data.end = z.raw.end + if z.err != nil { + z.tt = ErrorToken + return z.tt + } + if z.rawTag != "" { + if z.rawTag == "plaintext" { + // Read everything up to EOF. + for z.err == nil { + z.readByte() + } + z.data.end = z.raw.end + z.textIsRaw = true + } else { + z.readRawOrRCDATA() + } + if z.data.end > z.data.start { + z.tt = TextToken + z.convertNUL = true + return z.tt + } + } + z.textIsRaw = false + z.convertNUL = false + +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + + // Check if the '<' we have just read is part of a tag, comment + // or doctype. If not, it's part of the accumulated text token. + c = z.readByte() + if z.err != nil { + break loop + } + var tokenType TokenType + switch { + case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': + tokenType = StartTagToken + case c == '/': + tokenType = EndTagToken + case c == '!' || c == '?': + // We use CommentToken to mean any of "<!--actual comments-->", + // "<!DOCTYPE declarations>" and "<?xml processing instructions?>". + tokenType = CommentToken + default: + // Reconsume the current character. + z.raw.end-- + continue + } + + // We have a non-text token, but we might have accumulated some text + // before that. If so, we return the text first, and return the non- + // text token on the subsequent call to Next. + if x := z.raw.end - len("<a"); z.raw.start < x { + z.raw.end = x + z.data.end = x + z.tt = TextToken + return z.tt + } + switch tokenType { + case StartTagToken: + z.tt = z.readStartTag() + return z.tt + case EndTagToken: + c = z.readByte() + if z.err != nil { + break loop + } + if c == '>' { + // "</>" does not generate a token at all. Generate an empty comment + // to allow passthrough clients to pick up the data using Raw. + // Reset the tokenizer state and start again. + z.tt = CommentToken + return z.tt + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + z.readTag(false) + if z.err != nil { + z.tt = ErrorToken + } else { + z.tt = EndTagToken + } + return z.tt + } + z.raw.end-- + z.readUntilCloseAngle() + z.tt = CommentToken + return z.tt + case CommentToken: + if c == '!' { + z.tt = z.readMarkupDeclaration() + return z.tt + } + z.raw.end-- + z.readUntilCloseAngle() + z.tt = CommentToken + return z.tt + } + } + if z.raw.start < z.raw.end { + z.data.end = z.raw.end + z.tt = TextToken + return z.tt + } + z.tt = ErrorToken + return z.tt +} + +// Raw returns the unmodified text of the current token. Calling Next, Token, +// Text, TagName or TagAttr may change the contents of the returned slice. +// +// The token stream's raw bytes partition the byte stream (up until an +// ErrorToken). There are no overlaps or gaps between two consecutive token's +// raw bytes. One implication is that the byte offset of the current token is +// the sum of the lengths of all previous tokens' raw bytes. +func (z *Tokenizer) Raw() []byte { + return z.buf[z.raw.start:z.raw.end] +} + +// convertNewlines converts "\r" and "\r\n" in s to "\n". +// The conversion happens in place, but the resulting slice may be shorter. +func convertNewlines(s []byte) []byte { + for i, c := range s { + if c != '\r' { + continue + } + + src := i + 1 + if src >= len(s) || s[src] != '\n' { + s[i] = '\n' + continue + } + + dst := i + for src < len(s) { + if s[src] == '\r' { + if src+1 < len(s) && s[src+1] == '\n' { + src++ + } + s[dst] = '\n' + } else { + s[dst] = s[src] + } + src++ + dst++ + } + return s[:dst] + } + return s +} + +var ( + nul = []byte("\x00") + replacement = []byte("\ufffd") +) + +// Text returns the unescaped text of a text, comment or doctype token. The +// contents of the returned slice may change on the next call to Next. +func (z *Tokenizer) Text() []byte { + switch z.tt { + case TextToken, CommentToken, DoctypeToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + s = convertNewlines(s) + if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) { + s = bytes.Replace(s, nul, replacement, -1) + } + if !z.textIsRaw { + s = unescape(s, false) + } + return s + } + return nil +} + +// TagName returns the lower-cased name of a tag token (the `img` out of +// `<IMG SRC="foo">`) and whether the tag has attributes. +// The contents of the returned slice may change on the next call to Next. +func (z *Tokenizer) TagName() (name []byte, hasAttr bool) { + if z.data.start < z.data.end { + switch z.tt { + case StartTagToken, EndTagToken, SelfClosingTagToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + return lower(s), z.nAttrReturned < len(z.attr) + } + } + return nil, false +} + +// TagAttr returns the lower-cased key and unescaped value of the next unparsed +// attribute for the current tag token and whether there are more attributes. +// The contents of the returned slices may change on the next call to Next. +func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) { + if z.nAttrReturned < len(z.attr) { + switch z.tt { + case StartTagToken, SelfClosingTagToken: + x := z.attr[z.nAttrReturned] + z.nAttrReturned++ + key = z.buf[x[0].start:x[0].end] + val = z.buf[x[1].start:x[1].end] + return lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr) + } + } + return nil, nil, false +} + +// Token returns the current Token. The result's Data and Attr values remain +// valid after subsequent Next calls. +func (z *Tokenizer) Token() Token { + t := Token{Type: z.tt} + switch z.tt { + case TextToken, CommentToken, DoctypeToken: + t.Data = string(z.Text()) + case StartTagToken, SelfClosingTagToken, EndTagToken: + name, moreAttr := z.TagName() + for moreAttr { + var key, val []byte + key, val, moreAttr = z.TagAttr() + t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)}) + } + if a := atom.Lookup(name); a != 0 { + t.DataAtom, t.Data = a, a.String() + } else { + t.DataAtom, t.Data = 0, string(name) + } + } + return t +} + +// SetMaxBuf sets a limit on the amount of data buffered during tokenization. +// A value of 0 means unlimited. +func (z *Tokenizer) SetMaxBuf(n int) { + z.maxBuf = n +} + +// NewTokenizer returns a new HTML Tokenizer for the given Reader. +// The input is assumed to be UTF-8 encoded. +func NewTokenizer(r io.Reader) *Tokenizer { + return NewTokenizerFragment(r, "") +} + +// NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for +// tokenizing an existing element's InnerHTML fragment. contextTag is that +// element's tag, such as "div" or "iframe". +// +// For example, how the InnerHTML "a<b" is tokenized depends on whether it is +// for a <p> tag or a <script> tag. +// +// The input is assumed to be UTF-8 encoded. +func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer { + z := &Tokenizer{ + r: r, + buf: make([]byte, 0, 4096), + } + if contextTag != "" { + switch s := strings.ToLower(contextTag); s { + case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp": + z.rawTag = s + } + } + return z +} diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go index a3067f8de7..e6f55cbd16 100644 --- a/vendor/golang.org/x/net/http2/databuffer.go +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -20,41 +20,44 @@ import ( // TODO: Benchmark to determine if the pools are necessary. The GC may have // improved enough that we can instead allocate chunks like this: // make([]byte, max(16<<10, expectedBytesRemaining)) -var ( - dataChunkSizeClasses = []int{ - 1 << 10, - 2 << 10, - 4 << 10, - 8 << 10, - 16 << 10, - } - dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return make([]byte, 1<<10) }}, - {New: func() interface{} { return make([]byte, 2<<10) }}, - {New: func() interface{} { return make([]byte, 4<<10) }}, - {New: func() interface{} { return make([]byte, 8<<10) }}, - {New: func() interface{} { return make([]byte, 16<<10) }}, - } -) +var dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return new([1 << 10]byte) }}, + {New: func() interface{} { return new([2 << 10]byte) }}, + {New: func() interface{} { return new([4 << 10]byte) }}, + {New: func() interface{} { return new([8 << 10]byte) }}, + {New: func() interface{} { return new([16 << 10]byte) }}, +} func getDataBufferChunk(size int64) []byte { - i := 0 - for ; i < len(dataChunkSizeClasses)-1; i++ { - if size <= int64(dataChunkSizeClasses[i]) { - break - } + switch { + case size <= 1<<10: + return dataChunkPools[0].Get().(*[1 << 10]byte)[:] + case size <= 2<<10: + return dataChunkPools[1].Get().(*[2 << 10]byte)[:] + case size <= 4<<10: + return dataChunkPools[2].Get().(*[4 << 10]byte)[:] + case size <= 8<<10: + return dataChunkPools[3].Get().(*[8 << 10]byte)[:] + default: + return dataChunkPools[4].Get().(*[16 << 10]byte)[:] } - return dataChunkPools[i].Get().([]byte) } func putDataBufferChunk(p []byte) { - for i, n := range dataChunkSizeClasses { - if len(p) == n { - dataChunkPools[i].Put(p) - return - } + switch len(p) { + case 1 << 10: + dataChunkPools[0].Put((*[1 << 10]byte)(p)) + case 2 << 10: + dataChunkPools[1].Put((*[2 << 10]byte)(p)) + case 4 << 10: + dataChunkPools[2].Put((*[4 << 10]byte)(p)) + case 8 << 10: + dataChunkPools[3].Put((*[8 << 10]byte)(p)) + case 16 << 10: + dataChunkPools[4].Put((*[16 << 10]byte)(p)) + default: + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } // dataBuffer is an io.ReadWriter backed by a list of data chunks. diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go deleted file mode 100644 index 5bf62b032e..0000000000 --- a/vendor/golang.org/x/net/http2/go111.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - if trace != nil { - return trace.Got1xxResponse - } - return nil -} diff --git a/vendor/golang.org/x/net/http2/go115.go b/vendor/golang.org/x/net/http2/go115.go deleted file mode 100644 index 908af1ab93..0000000000 --- a/vendor/golang.org/x/net/http2/go115.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.15 -// +build go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS -// connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - dialer := &tls.Dialer{ - Config: cfg, - } - cn, err := dialer.DialContext(ctx, network, addr) - if err != nil { - return nil, err - } - tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed - return tlsCn, nil -} diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go deleted file mode 100644 index aca4b2b31a..0000000000 --- a/vendor/golang.org/x/net/http2/go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return tc.NetConn() -} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go deleted file mode 100644 index cc0baa8197..0000000000 --- a/vendor/golang.org/x/net/http2/not_go111.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - return nil -} diff --git a/vendor/golang.org/x/net/http2/not_go115.go b/vendor/golang.org/x/net/http2/not_go115.go deleted file mode 100644 index e6c04cf7ac..0000000000 --- a/vendor/golang.org/x/net/http2/not_go115.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.15 -// +build !go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext opens a TLS connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if cfg.InsecureSkipVerify { - return cn, nil - } - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - return cn, nil -} diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go deleted file mode 100644 index eab532c96b..0000000000 --- a/vendor/golang.org/x/net/http2/not_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return nil -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 02c88b6b3e..ae94c6408d 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -2549,7 +2549,6 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished - dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -2669,7 +2668,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { - rws.dirty = true return 0, err } if endStream { @@ -2690,7 +2688,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - rws.dirty = true return 0, err } } @@ -2702,9 +2699,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) - if err != nil { - rws.dirty = true - } return len(p), err } return len(p), nil @@ -2920,14 +2914,12 @@ func (rws *responseWriterState) writeHeader(code int) { h.Del("Transfer-Encoding") } - if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: code, h: h, endStream: rws.handlerDone && !rws.hasTrailers(), - }) != nil { - rws.dirty = true - } + }) return } @@ -2992,19 +2984,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws - dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - if !dirty { - // Only recycle the pool if all prior Write calls to - // the serverConn goroutine completed successfully. If - // they returned earlier due to resets from the peer - // there might still be write goroutines outstanding - // from the serverConn referencing the rws memory. See - // issue 20704. - responseWriterStatePool.Put(rws) - } + responseWriterStatePool.Put(rws) } // Push errors. @@ -3187,6 +3170,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) { panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) } + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) return promisedID, nil } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 4515b22c4a..df578b86c6 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1018,7 +1018,7 @@ func (cc *ClientConn) forceCloseConn() { if !ok { return } - if nc := tlsUnderlyingConn(tc); nc != nil { + if nc := tc.NetConn(); nc != nil { nc.Close() } } @@ -3201,3 +3201,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { trace.GotFirstResponseByte() } } + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + dialer := &tls.Dialer{ + Config: cfg, + } + cn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed + return tlsCn, nil +} diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go index c5c4338dbe..712f1ad839 100644 --- a/vendor/golang.org/x/net/idna/go118.go +++ b/vendor/golang.org/x/net/idna/go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.18 -// +build go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go index 64ccf85feb..7b37178847 100644 --- a/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go index ee1698cefb..cc6a892a4a 100644 --- a/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go index 3aaccab1c5..40e74bb3d2 100644 --- a/vendor/golang.org/x/net/idna/pre_go118.go +++ b/vendor/golang.org/x/net/idna/pre_go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.18 -// +build !go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go index d1d62ef459..c6c2bf10a6 100644 --- a/vendor/golang.org/x/net/idna/tables10.0.0.go +++ b/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package idna diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go index 167efba712..76789393cc 100644 --- a/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package idna diff --git a/vendor/golang.org/x/net/idna/tables12.0.0.go b/vendor/golang.org/x/net/idna/tables12.0.0.go index ab40f7bcc3..0600cd2ae5 100644 --- a/vendor/golang.org/x/net/idna/tables12.0.0.go +++ b/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go index 66701eadfb..2fb768ef6d 100644 --- a/vendor/golang.org/x/net/idna/tables13.0.0.go +++ b/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables15.0.0.go b/vendor/golang.org/x/net/idna/tables15.0.0.go index 40033778f0..5ff05fe1af 100644 --- a/vendor/golang.org/x/net/idna/tables15.0.0.go +++ b/vendor/golang.org/x/net/idna/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go index 4074b5332e..0f25e84ca2 100644 --- a/vendor/golang.org/x/net/idna/tables9.0.0.go +++ b/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package idna diff --git a/vendor/golang.org/x/net/idna/trie12.0.0.go b/vendor/golang.org/x/net/idna/trie12.0.0.go index bb63f904b3..8a75b96673 100644 --- a/vendor/golang.org/x/net/idna/trie12.0.0.go +++ b/vendor/golang.org/x/net/idna/trie12.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.16 -// +build !go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/trie13.0.0.go b/vendor/golang.org/x/net/idna/trie13.0.0.go index 7d68a8dc13..fa45bb9074 100644 --- a/vendor/golang.org/x/net/idna/trie13.0.0.go +++ b/vendor/golang.org/x/net/idna/trie13.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.16 -// +build go1.16 package idna diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go new file mode 100644 index 0000000000..e99c92f39c --- /dev/null +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -0,0 +1,198 @@ +package oauth2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 +const ( + errAuthorizationPending = "authorization_pending" + errSlowDown = "slow_down" + errAccessDenied = "access_denied" + errExpiredToken = "expired_token" +) + +// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 +type DeviceAuthResponse struct { + // DeviceCode + DeviceCode string `json:"device_code"` + // UserCode is the code the user should enter at the verification uri + UserCode string `json:"user_code"` + // VerificationURI is where user should enter the user code + VerificationURI string `json:"verification_uri"` + // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. + VerificationURIComplete string `json:"verification_uri_complete,omitempty"` + // Expiry is when the device code and user code expire + Expiry time.Time `json:"expires_in,omitempty"` + // Interval is the duration in seconds that Poll should wait between requests + Interval int64 `json:"interval,omitempty"` +} + +func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { + type Alias DeviceAuthResponse + var expiresIn int64 + if !d.Expiry.IsZero() { + expiresIn = int64(time.Until(d.Expiry).Seconds()) + } + return json.Marshal(&struct { + ExpiresIn int64 `json:"expires_in,omitempty"` + *Alias + }{ + ExpiresIn: expiresIn, + Alias: (*Alias)(&d), + }) + +} + +func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { + type Alias DeviceAuthResponse + aux := &struct { + ExpiresIn int64 `json:"expires_in"` + // workaround misspelling of verification_uri + VerificationURL string `json:"verification_url"` + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpiresIn != 0 { + c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) + } + if c.VerificationURI == "" { + c.VerificationURI = aux.VerificationURL + } + return nil +} + +// DeviceAuth returns a device auth struct which contains a device code +// and authorization information provided for users to enter on another device. +func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 + v := url.Values{ + "client_id": {c.ClientID}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveDeviceAuth(ctx, c, v) +} + +func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { + if c.Endpoint.DeviceAuthURL == "" { + return nil, errors.New("endpoint missing DeviceAuthURL") + } + + req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + t := time.Now() + r, err := internal.ContextClient(ctx).Do(req) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + da := &DeviceAuthResponse{} + err = json.Unmarshal(body, &da) + if err != nil { + return nil, fmt.Errorf("unmarshal %s", err) + } + + if !da.Expiry.IsZero() { + // Make a small adjustment to account for time taken by the request + da.Expiry = da.Expiry.Add(-time.Since(t)) + } + + return da, nil +} + +// DeviceAccessToken polls the server to exchange a device code for a token. +func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { + if !da.Expiry.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, da.Expiry) + defer cancel() + } + + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 + v := url.Values{ + "client_id": {c.ClientID}, + "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": {da.DeviceCode}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + + // "If no value is provided, clients MUST use 5 as the default." + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 + interval := da.Interval + if interval == 0 { + interval = 5 + } + + ticker := time.NewTicker(time.Duration(interval) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ticker.C: + tok, err := retrieveToken(ctx, c, v) + if err == nil { + return tok, nil + } + + e, ok := err.(*RetrieveError) + if !ok { + return nil, err + } + switch e.ErrorCode { + case errSlowDown: + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 + // "the interval MUST be increased by 5 seconds for this and all subsequent requests" + interval += 5 + ticker.Reset(time.Duration(interval) * time.Second) + case errAuthorizationPending: + // Do nothing. + case errAccessDenied, errExpiredToken: + fallthrough + default: + return tok, err + } + } + } +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index cc7c98c25d..90a2c3d6dc 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -75,8 +75,9 @@ type TokenSource interface { // Endpoint represents an OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { - AuthURL string - TokenURL string + AuthURL string + DeviceAuthURL string + TokenURL string // AuthStyle optionally specifies how the endpoint wants the // client ID & client secret sent. The zero value means to @@ -143,15 +144,19 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // that asks for permissions for the required scopes explicitly. // -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-empty string and validate that it matches the -// state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// State is an opaque value used by the client to maintain state between the +// request and callback. The authorization server includes this value when +// redirecting the user agent back to the client. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challenge. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// +// To protect against CSRF attacks, opts should include a PKCE challenge +// (S256ChallengeOption). Not all servers support PKCE. An alternative is to +// generate a random state parameter and verify it after exchange. +// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating +// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and +// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) @@ -166,7 +171,6 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { v.Set("scope", strings.Join(c.Scopes, " ")) } if state != "" { - // TODO(light): Docs say never to omit state; don't allow empty. v.Set("state", state) } for _, opt := range opts { @@ -211,10 +215,11 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). +// calling Exchange, be sure to validate FormValue("state") if you are +// using it to protect against CSRF attacks. // -// Opts may include the PKCE verifier code if previously used in AuthCodeURL. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// If using PKCE to protect against CSRF attacks, opts should include a +// VerifierOption. func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { v := url.Values{ "grant_type": {"authorization_code"}, diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go new file mode 100644 index 0000000000..50593b6dfe --- /dev/null +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package oauth2 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "net/url" +) + +const ( + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + codeVerifierKey = "code_verifier" +) + +// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. +// This follows recommendations in RFC 7636. +// +// A fresh verifier should be generated for each authorization. +// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccessToken). +func GenerateVerifier() string { + // "RECOMMENDED that the output of a suitable random number generator be + // used to create a 32-octet sequence. The octet sequence is then + // base64url-encoded to produce a 43-octet URL-safe string to use as the + // code verifier." + // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + data := make([]byte, 32) + if _, err := rand.Read(data); err != nil { + panic(err) + } + return base64.RawURLEncoding.EncodeToString(data) +} + +// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be +// passed to Config.Exchange or Config.DeviceAccessToken only. +func VerifierOption(verifier string) AuthCodeOption { + return setParam{k: codeVerifierKey, v: verifier} +} + +// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. +// +// Prefer to use S256ChallengeOption where possible. +func S256ChallengeFromVerifier(verifier string) string { + sha := sha256.Sum256([]byte(verifier)) + return base64.RawURLEncoding.EncodeToString(sha[:]) +} + +// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// only. +func S256ChallengeOption(verifier string) AuthCodeOption { + return challengeOption{ + challenge_method: "S256", + challenge: S256ChallengeFromVerifier(verifier), + } +} + +type challengeOption struct{ challenge_method, challenge string } + +func (p challengeOption) setValue(m url.Values) { + m.Set(codeChallengeMethodKey, p.challenge_method) + m.Set(codeChallengeKey, p.challenge) +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go index 2000064a81..5627d70e39 100644 --- a/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.19 -// +build !go1.19 package execabs diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go index f364b34189..d60ab1b419 100644 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.19 -// +build go1.19 package execabs diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go index c9b69937a0..73687de748 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.5 -// +build go1.5 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go index 98bf56b732..fb94582184 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.5 -// +build !go1.5 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/race.go b/vendor/golang.org/x/sys/plan9/race.go index 62377d2ff9..c02d9ed333 100644 --- a/vendor/golang.org/x/sys/plan9/race.go +++ b/vendor/golang.org/x/sys/plan9/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 && race -// +build plan9,race package plan9 diff --git a/vendor/golang.org/x/sys/plan9/race0.go b/vendor/golang.org/x/sys/plan9/race0.go index f8da30876d..7b15e15f65 100644 --- a/vendor/golang.org/x/sys/plan9/race0.go +++ b/vendor/golang.org/x/sys/plan9/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 && !race -// +build plan9,!race package plan9 diff --git a/vendor/golang.org/x/sys/plan9/str.go b/vendor/golang.org/x/sys/plan9/str.go index 55fa8d025e..ba3e8ff8a6 100644 --- a/vendor/golang.org/x/sys/plan9/str.go +++ b/vendor/golang.org/x/sys/plan9/str.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 -// +build plan9 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go index 67e5b0115c..d631fd664a 100644 --- a/vendor/golang.org/x/sys/plan9/syscall.go +++ b/vendor/golang.org/x/sys/plan9/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 -// +build plan9 // Package plan9 contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go index 3f40b9bd74..f780d5c807 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && 386 -// +build plan9,386 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go index 0e6a96aa4f..7de61065f6 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && amd64 -// +build plan9,amd64 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go index 244c501b77..ea85780f03 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && arm -// +build plan9,arm package plan9 diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index abc89c104a..e7d3df4bd3 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos -// +build go1.9 package unix diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s index db9171c2e4..269e173ca4 100644 --- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s index e0fcd9b3de..a4fcef0e0d 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s index 2b99c349a2..1e63615c57 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc -// +build darwin dragonfly freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s index d702d4adc7..6496c31008 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s index fe36a7391a..4fd1f54daa 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s index e5b9a84899..42f7eb9e47 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s index d560019ea2..f8902667e9 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s index 8fd101d071..3b4734870d 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index 7ed38e43c6..67e29f3178 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s index 8ef1d51402..d6ae269ce1 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index 98ae02760d..01e5e253c6 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && arm64 && gc -// +build linux -// +build arm64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s index 565357288a..2abf12f6e8 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && loong64 && gc -// +build linux -// +build loong64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index 21231d2ce1..f84bae7120 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) && gc -// +build linux -// +build mips64 mips64le -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index 6783b26c60..f08f628077 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) && gc -// +build linux -// +build mips mipsle -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 19d4989344..bdfc024d2d 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) && gc -// +build linux -// +build ppc64 ppc64le -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index e42eb81d58..2e8c996120 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && gc -// +build riscv64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index c46aab3395..2c394b11eb 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && s390x && gc -// +build linux -// +build s390x -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s index 5e7a1169c0..fab586a2c4 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s index f8c5394c1a..f949ec5476 100644 --- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s index 3b54e18581..2f67ba86d5 100644 --- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x && gc -// +build zos -// +build s390x -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go index 0b7c6adb86..a08657890f 100644 --- a/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd -// +build freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go index 394a3965b6..6fb7cb77d0 100644 --- a/vendor/golang.org/x/sys/unix/constants.go +++ b/vendor/golang.org/x/sys/unix/constants.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go index 65a998508d..d785134617 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc // Functions to access/create device major and minor numbers matching the // encoding used by AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go index 8fc08ad0aa..623a5e6973 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 // Functions to access/create device major and minor numbers matching the // encoding used AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go index a388e59a0e..bb6a64fe92 100644 --- a/vendor/golang.org/x/sys/unix/dev_zos.go +++ b/vendor/golang.org/x/sys/unix/dev_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Functions to access/create device major and minor numbers matching the // encoding used by z/OS. diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 2499f977b0..1ebf117826 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go index a520265576..1095fd31d6 100644 --- a/vendor/golang.org/x/sys/unix/endian_big.go +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 -// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index b0f2bc4ae3..b9f0e277b1 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index 29ccc4d133..a96da71f47 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Unix environment variables. diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go index cedaf7e024..7753fddea8 100644 --- a/vendor/golang.org/x/sys/unix/epoll_zos.go +++ b/vendor/golang.org/x/sys/unix/epoll_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index e9b991258c..6200876fb2 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build dragonfly || freebsd || linux || netbsd || openbsd -// +build dragonfly freebsd linux netbsd openbsd +//go:build dragonfly || freebsd || linux || netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index 29d44808b1..13b4acd5c6 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc) -// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go index a8068f94f2..9e83d18cd0 100644 --- a/vendor/golang.org/x/sys/unix/fdset.go +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go index e377cc9f49..c8bde601e7 100644 --- a/vendor/golang.org/x/sys/unix/fstatfs_zos.go +++ b/vendor/golang.org/x/sys/unix/fstatfs_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index b06f52d748..aca5721ddc 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index f98a1c542f..d468b7b47f 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd #include <errno.h> #include <stdint.h> diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go index e60e49a3d9..972d61bd75 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && linux && amd64 -// +build gccgo,linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 15721a5104..848840ae4c 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index 0d12c0851a..dbe680eab8 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -231,3 +231,8 @@ func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) { func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error { return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value)) } + +// IoctlLoopConfigure configures all loop device parameters in a single step +func IoctlLoopConfigure(fd int, value *LoopConfig) error { + return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value)) +} diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go index 7def9580e6..5b0759bd86 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_signed.go +++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || solaris -// +build aix solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 649913d1ea..20f470b9d0 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go +++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd -// +build darwin dragonfly freebsd hurd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go index cdc21bf76d..c8b2a750f8 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 47fa6a7ebd..6202638bae 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -519,6 +519,7 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || + $2 == "LOOP_CONFIGURE" || $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || @@ -560,7 +561,7 @@ ccflags="$@" $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && + $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+|BPF_F_LINK)$/ && $2 ~ /^(BPF|DLT)_/ || $2 ~ /^AUDIT_/ || $2 ~ /^(CLOCK|TIMER)_/ || @@ -663,7 +664,6 @@ echo '// mkerrors.sh' "$@" echo '// Code generated by the command above; see README.md. DO NOT EDIT.' echo echo "//go:build ${GOARCH} && ${GOOS}" -echo "// +build ${GOARCH},${GOOS}" echo go tool cgo -godefs -- "$@" _const.go >_error.out cat _error.out | grep -vf _error.grep | grep -vf _signal.grep diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index ca0513632e..4b68e59780 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || openbsd || solaris -// +build aix darwin dragonfly freebsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fa93d0aa90..fd45fe529d 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux || netbsd -// +build linux netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 53f1b4c5b8..4d0a3430ed 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/pledge_openbsd.go b/vendor/golang.org/x/sys/unix/pledge_openbsd.go index eb48294b27..6a09af53e6 100644 --- a/vendor/golang.org/x/sys/unix/pledge_openbsd.go +++ b/vendor/golang.org/x/sys/unix/pledge_openbsd.go @@ -8,54 +8,31 @@ import ( "errors" "fmt" "strconv" - "syscall" - "unsafe" ) // Pledge implements the pledge syscall. // -// The pledge syscall does not accept execpromises on OpenBSD releases -// before 6.3. -// -// execpromises must be empty when Pledge is called on OpenBSD -// releases predating 6.3, otherwise an error will be returned. +// This changes both the promises and execpromises; use PledgePromises or +// PledgeExecpromises to only change the promises or execpromises +// respectively. // // For more information see pledge(2). func Pledge(promises, execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - pptr, err := syscall.BytePtrFromString(promises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable will hold either a nil unsafe.Pointer or - // an unsafe.Pointer to a string (execpromises). - var expr unsafe.Pointer - - // If we're running on OpenBSD > 6.2, pass execpromises to the syscall. - if maj > 6 || (maj == 6 && min > 2) { - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - expr = unsafe.Pointer(exptr) - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, exptr) } // PledgePromises implements the pledge syscall. @@ -64,30 +41,16 @@ func Pledge(promises, execpromises string) error { // // For more information see pledge(2). func PledgePromises(promises string) error { - maj, min, err := majmin() - if err != nil { - return err - } - - err = pledgeAvailable(maj, min, "") - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - // This variable holds the execpromises and is always nil. - var expr unsafe.Pointer - - pptr, err := syscall.BytePtrFromString(promises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, nil) } // PledgeExecpromises implements the pledge syscall. @@ -96,30 +59,16 @@ func PledgePromises(promises string) error { // // For more information see pledge(2). func PledgeExecpromises(execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable holds the promises and is always nil. - var pptr unsafe.Pointer - - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(pptr), uintptr(unsafe.Pointer(exptr)), 0) - if e != 0 { - return e - } - - return nil + return pledge(nil, exptr) } // majmin returns major and minor version number for an OpenBSD system. @@ -147,16 +96,15 @@ func majmin() (major int, minor int, err error) { // pledgeAvailable checks for availability of the pledge(2) syscall // based on the running OpenBSD version. -func pledgeAvailable(maj, min int, execpromises string) error { - // If OpenBSD <= 5.9, pledge is not available. - if (maj == 5 && min != 9) || maj < 5 { - return fmt.Errorf("pledge syscall is not available on OpenBSD %d.%d", maj, min) +func pledgeAvailable() error { + maj, min, err := majmin() + if err != nil { + return err } - // If OpenBSD <= 6.2 and execpromises is not empty, - // return an error - execpromises is not available before 6.3 - if (maj < 6 || (maj == 6 && min <= 2)) && execpromises != "" { - return fmt.Errorf("cannot use execpromises on OpenBSD %d.%d", maj, min) + // Require OpenBSD 6.4 as a minimum. + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Pledge on OpenBSD %d.%d", maj, min) } return nil diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 463c3eff7f..3f0975f3de 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index ed0509a011..a4d35db5dc 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ios -// +build ios package unix diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go index 6f6c5fec5a..714d2aae7c 100644 --- a/vendor/golang.org/x/sys/unix/race.go +++ b/vendor/golang.org/x/sys/unix/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin && race) || (linux && race) || (freebsd && race) -// +build darwin,race linux,race freebsd,race package unix diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go index 706e1322ae..4a9f6634c9 100644 --- a/vendor/golang.org/x/sys/unix/race0.go +++ b/vendor/golang.org/x/sys/unix/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos -// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/vendor/golang.org/x/sys/unix/readdirent_getdents.go index 4d6257569e..dbd2b6ccb1 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdents.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdents.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd -// +build aix dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 2a4ba47c45..130398b6b7 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin -// +build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 3865943f6e..c3a62dbb1b 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Socket control messages diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 0840fe4a57..4a1eab37ec 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 63e8c83831..5ea74da982 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Package unix contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index e94e6cdac8..67ce6cef2d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix -// +build aix // Aix system calls. // This file is compiled as ordinary Go code, @@ -107,7 +106,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index f2871fa953..1fdaa47600 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 75718ec0f1..c87f9a9f45 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 4217de518b..a00c3e5450 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd // BSD system call wrappers shared by *BSD based systems // including OS X (Darwin) and FreeBSD. Like the other @@ -317,7 +316,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index b37310ce9b..0eaecf5fc3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index d51ec99630..f36c6707cf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 53c96641f8..16dc699379 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && go1.12 -// +build darwin,go1.12 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 4e2d32120a..14bab6b2de 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index b8da510043..3967bca772 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 47155c4839..eff19ada23 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 08932093fa..4f24b517a6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index d151a0d0e5..ac30759ece 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index d5cd64b378..aab725ca77 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index 381fd4673b..ba46651f8e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build hurd -// +build hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go index 7cf54a3e4f..df89f9e6b4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && hurd -// +build 386,hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 87db5a6a8c..a863f7052c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -5,7 +5,6 @@ // illumos system calls not present on Solaris. //go:build amd64 && illumos -// +build amd64,illumos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index fb4e50224c..0f85e29e62 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -61,15 +61,23 @@ func FanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname string) ( } //sys fchmodat(dirfd int, path string, mode uint32) (err error) - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior - // and check the flags. Otherwise the mode would be applied to the symlink - // destination which is not what the user expects. - if flags&^AT_SYMLINK_NOFOLLOW != 0 { - return EINVAL - } else if flags&AT_SYMLINK_NOFOLLOW != 0 { - return EOPNOTSUPP +//sys fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) + +func Fchmodat(dirfd int, path string, mode uint32, flags int) error { + // Linux fchmodat doesn't support the flags parameter, but fchmodat2 does. + // Try fchmodat2 if flags are specified. + if flags != 0 { + err := fchmodat2(dirfd, path, mode, flags) + if err == ENOSYS { + // fchmodat2 isn't available. If the flags are known to be valid, + // return EOPNOTSUPP to indicate that fchmodat doesn't support them. + if flags&^(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { + return EINVAL + } else if flags&(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { + return EOPNOTSUPP + } + } + return err } return fchmodat(dirfd, path, mode) } @@ -417,7 +425,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -1301,7 +1310,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } func GetsockoptTpacketStats(fd, level, opt int) (*TpacketStats, error) { @@ -2482,3 +2491,5 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } return attr, nil } + +//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index c7d9945ea1..506dafa7b4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && linux -// +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go index 08086ac6a4..38d55641b5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (386 || amd64 || mips || mipsle || mips64 || mipsle || ppc64 || ppc64le || ppc || s390x || sparc64) -// +build linux -// +build 386 amd64 mips mipsle mips64 mipsle ppc64 ppc64le ppc s390x sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 70601ce369..d557cf8de3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux -// +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index 8b0f0f3aa5..facdb83b23 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux && gc -// +build amd64,linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index da2986415a..cd2dd797fd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && linux -// +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index f5266689af..cf2ee6c75e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && linux -// +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index 2b1168d7d1..ffc4c2b635 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc -// +build linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 9843fb4896..9ebfdcf447 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc && 386 -// +build linux,gc,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index a6008fccd5..5f2b57c4c2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && gc && linux -// +build arm,gc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go index 7740af2428..d1a3ad8263 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && 386 -// +build linux,gccgo,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go index e16a12299a..f2f67423e9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && arm -// +build linux,gccgo,arm package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index f6ab02ec15..3d0e98451f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build loong64 && linux -// +build loong64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 93fe59d25d..70963a95ab 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) -// +build linux -// +build mips64 mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index aae7f0ffd3..c218ebd280 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) -// +build linux -// +build mips mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index 66eff19a32..e6c48500ca 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && ppc -// +build linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 806aa2574d..7286a9aa88 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 5e6ceee129..6f5a288944 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && linux -// +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 2f89e8f5de..66f31210d0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build s390x && linux -// +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 7ca064ae76..11d1f16986 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build sparc64 && linux -// +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 5199d282fd..7a5eb57432 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 70a9c52e98..62d8957ae6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 3eb5942f93..ce6a068851 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index fc6ccfd810..d46d689d1b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 6f34479b59..b25343c71a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -137,18 +137,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { - var _p0 unsafe.Pointer + var bufptr *Statfs_t var bufsize uintptr if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + bufptr = &buf[0] bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = e1 - } - return + return getfsstat(bufptr, bufsize, flags) } //sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) @@ -171,6 +166,20 @@ func Getresgid() (rgid, egid, sgid int) { //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys fcntl(fd int, cmd int, arg int) (n int, err error) +//sys fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) = SYS_FCNTL + +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return fcntl(int(fd), cmd, arg) +} + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + _, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk)) + return err +} + //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { @@ -326,4 +335,7 @@ func Uname(uname *Utsname) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) +//sys pledge(promises *byte, execpromises *byte) (err error) +//sys unveil(path *byte, flags *byte) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index 6baabcdcb0..9ddc89f4fc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index bab25360ea..70a3c96eea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 8eed3c4d4e..265caa87f7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index 483dde99d4..ac4fda1715 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go index 04aa43f41b..0a451e6dd4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build openbsd -// +build openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go index c2796139c0..30a308cbb4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go index 23199a7ff6..ea954330fa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index b99cfa1342..21974af064 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -128,7 +128,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -157,7 +158,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } const ImplementsGetwd = true diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 0bd25ef81f..e02d8ceae3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index f6eda27050..77081de8c7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index b6919ca580..05c95bccfa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc -// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris -// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index f6f707acf2..23f39b7af7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64le || ppc64) && gc -// +build linux -// +build ppc64le ppc64 -// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 4596d041ce..b473038c61 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix @@ -1105,7 +1104,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { diff --git a/vendor/golang.org/x/sys/unix/sysvshm_linux.go b/vendor/golang.org/x/sys/unix/sysvshm_linux.go index 2c3a4437f0..4fcd38de27 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_linux.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 5bb41d17bc..79a84f18b4 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin && !ios) || linux -// +build darwin,!ios linux package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go index 71bddefdb8..9eb0db664c 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 616b1b2848..7997b19022 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/unveil_openbsd.go b/vendor/golang.org/x/sys/unix/unveil_openbsd.go index 168d5ae779..cb7e598cef 100644 --- a/vendor/golang.org/x/sys/unix/unveil_openbsd.go +++ b/vendor/golang.org/x/sys/unix/unveil_openbsd.go @@ -4,39 +4,48 @@ package unix -import ( - "syscall" - "unsafe" -) +import "fmt" // Unveil implements the unveil syscall. // For more information see unveil(2). // Note that the special case of blocking further // unveil calls is handled by UnveilBlock. func Unveil(path string, flags string) error { - pathPtr, err := syscall.BytePtrFromString(path) - if err != nil { + if err := supportsUnveil(); err != nil { return err } - flagsPtr, err := syscall.BytePtrFromString(flags) + pathPtr, err := BytePtrFromString(path) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(unsafe.Pointer(pathPtr)), uintptr(unsafe.Pointer(flagsPtr)), 0) - if e != 0 { - return e + flagsPtr, err := BytePtrFromString(flags) + if err != nil { + return err } - return nil + return unveil(pathPtr, flagsPtr) } // UnveilBlock blocks future unveil calls. // For more information see unveil(2). func UnveilBlock() error { - // Both pointers must be nil. - var pathUnsafe, flagsUnsafe unsafe.Pointer - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(pathUnsafe), uintptr(flagsUnsafe), 0) - if e != 0 { - return e + if err := supportsUnveil(); err != nil { + return err } + return unveil(nil, nil) +} + +// supportsUnveil checks for availability of the unveil(2) system call based +// on the running OpenBSD version. +func supportsUnveil() error { + maj, min, err := majmin() + if err != nil { + return err + } + + // unveil is not available before 6.4 + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Unveil on OpenBSD %d.%d", maj, min) + } + return nil } diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index f5f8e9f366..e168793961 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd || netbsd -// +build freebsd netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index ca9799b79e..2fb219d787 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix // Created by cgo -godefs - DO NOT EDIT // cgo -godefs -- -maix32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index 200c8c26fe..b0e6f5c85c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -maix64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 1430076271..e40fa85245 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index ab044a7427..bb02aa6c05 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index 17bba0e44f..c0e0f8694c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index f8c2c51387..6c6923906f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 96310c3be1..dd9163f8e8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 777b69defa..493a2a793c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index c557ac2db3..8b437b307d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go index 341b4d9626..67c02dd579 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index f9c7f479b0..c73cfe2f10 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -481,10 +480,13 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_AFTER = 0x10 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_BEFORE = 0x8 + BPF_F_ID = 0x20 + BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 @@ -521,6 +523,7 @@ const ( BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 BPF_MEM = 0x60 + BPF_MEMSX = 0x80 BPF_MEMWORDS = 0x10 BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 @@ -776,6 +779,8 @@ const ( DEVLINK_GENL_MCGRP_CONFIG_NAME = "config" DEVLINK_GENL_NAME = "devlink" DEVLINK_GENL_VERSION = 0x1 + DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO = 0x4 + DEVLINK_PORT_FN_CAP_IPSEC_PACKET = 0x8 DEVLINK_PORT_FN_CAP_MIGRATABLE = 0x2 DEVLINK_PORT_FN_CAP_ROCE = 0x1 DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 @@ -1698,6 +1703,7 @@ const ( KEXEC_ON_CRASH = 0x1 KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 + KEXEC_UPDATE_ELFCOREHDR = 0x4 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CAPABILITIES = 0x1f KEYCTL_CAPS0_BIG_KEY = 0x10 @@ -1795,6 +1801,7 @@ const ( LOCK_SH = 0x1 LOCK_UN = 0x8 LOOP_CLR_FD = 0x4c01 + LOOP_CONFIGURE = 0x4c0a LOOP_CTL_ADD = 0x4c80 LOOP_CTL_GET_FREE = 0x4c82 LOOP_CTL_REMOVE = 0x4c81 @@ -2275,6 +2282,7 @@ const ( PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd PERF_MEM_LVLNUM_SHIFT = 0x21 + PERF_MEM_LVLNUM_UNC = 0x8 PERF_MEM_LVL_HIT = 0x2 PERF_MEM_LVL_IO = 0x1000 PERF_MEM_LVL_L1 = 0x8 @@ -3461,6 +3469,7 @@ const ( XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 + XDP_PKT_CONTD = 0x1 XDP_RING_NEED_WAKEUP = 0x1 XDP_RX_RING = 0x2 XDP_SHARED_UMEM = 0x1 @@ -3473,6 +3482,7 @@ const ( XDP_UMEM_REG = 0x4 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 + XDP_USE_SG = 0x10 XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 30aee00a53..4920821cf3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 8ebfa51278..a0c1e41127 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 271a21cdc7..c63985560f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 910c330a39..47cc62e25c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index a640798c93..27ac4a09e2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go @@ -119,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 LASX_CTX_MAGIC = 0x41535801 + LBT_CTX_MAGIC = 0x42540001 LSX_CTX_MAGIC = 0x53580001 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 0d5925d340..54694642a5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index d72a00e0b6..3adb81d758 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 02ba129f85..2dfe98f0d1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8daa6dd968..f5398f84f0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 63c8fa2f7f..c54f152d68 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 930799ec1b..76057dc72f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 8605a7dd7e..e0c3725e2b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 95a016f1c0..18f2813ed5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go @@ -228,6 +227,9 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETFDPIC = 0x21 + PTRACE_GETFDPIC_EXEC = 0x0 + PTRACE_GETFDPIC_INTERP = 0x1 RLIMIT_AS = 0x9 RLIMIT_MEMLOCK = 0x8 RLIMIT_NOFILE = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 1ae0108f57..11619d4ec8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 1bb7c6333b..396d994da7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 72f7420d20..130085df40 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 8d4eb0c080..84769a1a38 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 9eef9749f6..602ded0033 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -marm _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index 3b62ba192c..efc0406ee1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index af20e474b3..5a6500f837 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 6015fcb2bf..a5aeeb979d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 8d44955e44..0e9748a722 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index ae16fe7542..4f4449abc1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index 03d90fe355..76a363f0fe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go index 8e2c51b1ee..43ca0cdfdc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go index 13d403031e..b1b8bb2005 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 1afee6a089..d2ddd3176e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index fc7d0506f6..4dfd2e051d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on zerrors_linux_s390x.go // TODO: auto-generate. diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index 97f20ca282..586317c78e 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. //go:build linux && (arm || arm64) -// +build linux -// +build arm arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index 0b5f794305..d7c881be77 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. //go:build linux && (mips || mips64) -// +build linux -// +build mips mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index 2807f7e646..2d2de5d292 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. //go:build linux && (mipsle || mips64le) -// +build linux -// +build mipsle mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index 281ea64e34..5adc79fb5e 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. //go:build linux && (386 || amd64) -// +build linux -// +build 386 amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index d1d1d23311..6ea64a3c0c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc -// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index f99a18adc3..99ee4399a3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 -// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index c4d50ae500..b68a78362b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gc -// +build aix,ppc64,gc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 6903d3b09e..0a87450bf8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gccgo -// +build aix,ppc64,gccgo package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 1cad561e98..ccb02f240a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && amd64 -// +build darwin,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index b18edbd0e3..1b40b997b5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && arm64 -// +build darwin,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 0c67df64a5..aad65fc793 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build dragonfly && amd64 -// +build dragonfly,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index e6e05d145b..c0096391af 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && 386 -// +build freebsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 7508accac9..7664df7496 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && amd64 -// +build freebsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 7b56aead46..ae099182c9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm -// +build freebsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index cc623dcaae..11fd5d45bb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm64 -// +build freebsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index 5818491974..c3d2d65307 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && riscv64 -// +build freebsd,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index 6be25cd190..c698cbc01a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build illumos && amd64 -// +build illumos,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1ff3aec74c..1488d27128 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -38,6 +37,21 @@ func fchmodat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -2195,3 +2209,13 @@ func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_CACHESTAT, uintptr(fd), uintptr(unsafe.Pointer(crange)), uintptr(unsafe.Pointer(cstat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 07b549cc25..4def3e9fcb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && 386 -// +build linux,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 5f481bf83f..fef2bc8ba9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && amd64 -// +build linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 824cd52c7f..a9fd76a884 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm -// +build linux,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index e77aecfe98..4600650280 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm64 -// +build linux,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go index 806ffd1e12..c8987d2646 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && loong64 -// +build linux,loong64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 961a3afb7b..921f430611 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips -// +build linux,mips package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index ed05005e91..44f067829c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64 -// +build linux,mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index d365b718f3..e7fa0abf0d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64le -// +build linux,mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index c3f1b8bbde..8c5125675e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mipsle -// +build linux,mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index a6574cf98b..7392fd45e4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc -// +build linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index f40990264f..41180434e6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64 -// +build linux,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 9dfcc29974..40c6ce7ae5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64le -// +build linux,ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 0ab4f2ed72..2cfe34adb1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && riscv64 -// +build linux,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 6cde32237d..61e6f07097 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && s390x -// +build linux,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 5253d65bf1..834b842042 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && sparc64 -// +build linux,sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 2df3c5bac6..e91ebc14a1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && 386 -// +build netbsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index a60556babb..be28babbcd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && amd64 -// +build netbsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 9f788917a4..fb587e8261 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm -// +build netbsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 82a4cb2dc4..d576438bb0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm64 -// +build netbsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 66b3b64563..a1d061597c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && 386 -// +build openbsd,386 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 3dcacd30d7..41b5617316 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index c5c4cc112e..5b2a740977 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && amd64 -// +build openbsd,amd64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 2763620b01..4019a656f6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 93bfbb3287..f6eda1344a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm -// +build openbsd,arm package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index c922314048..ac4af24f90 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index a107b8fda5..55df20ae9d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm64 -// +build openbsd,arm64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index a6bc32c922..f77d532121 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index c427de509e..8c1155cbc0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && mips64 -// +build openbsd,mips64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index b4e7bceabf..fae140b62c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 60c1a99ae4..7cc80c58d9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && ppc64 -// +build openbsd,ppc64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index ca3f766009..9d1e0ff06d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -213,6 +213,12 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fcntl(SB) + RET +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_ppoll(SB) RET @@ -801,8 +807,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getfsstat(SB) + RET +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_utimensat(SB) RET GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pledge(SB) + RET +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unveil(SB) + RET +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 52eba360f8..0688737f49 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && riscv64 -// +build openbsd,riscv64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,33 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" + + diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index 477a7d5b21..da115f9a4b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index b401894644..829b87feb8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build solaris && amd64 -// +build solaris,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 1d8fe1d4b2..94f0112383 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 55e0484719..3a58ae819a 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index d2243cf83f..dcb7a0eb72 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 82dc51bd8b..db5a7bf13c 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index cbdda1a4ae..7be575a777 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index f55eae1a82..d6e3174c69 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go index e44054470b..ee97157d01 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go index a0db82fce2..35c3b91d0f 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go index f8298ff9b5..5edda76870 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 5eb433bbf0..0dc9e8b4d9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go index 703675c0c4..308ddf3a1f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 4e0d96107b..418664e3dc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 01636b838d..34d0b86d7c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index ad99bc106a..b71cf45e2e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 89dcc42747..e32df1c1ee 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go index ee37aaa0c9..15ad6111f3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 9862853d34..fcf3ecbdde 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix @@ -448,4 +447,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 8901f0f4e5..f56dc2504a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix @@ -370,4 +369,6 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 6902c37eed..974bf24676 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix @@ -412,4 +411,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index a6d3dff811..39a2739e23 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix @@ -315,4 +314,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index b18f3f7107..cf9c9d77e1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix @@ -309,4 +308,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 0302e5e3de..10b7362ef4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix @@ -432,4 +431,5 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 6693ba4a0f..cd4d8b4fd3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix @@ -362,4 +361,5 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index fd93f4987c..2c0efca818 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix @@ -362,4 +361,5 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 760ddcadc2..a72e31d391 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix @@ -432,4 +431,5 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index cff2b2555b..c7d1e37471 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix @@ -439,4 +438,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index a4b2405d09..f4d4838c87 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix @@ -411,4 +410,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index aca54b4e3a..b64f0e5911 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix @@ -411,4 +410,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9d1738d641..95711195a0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix @@ -316,4 +315,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 022878dc8d..f94e943bc4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix @@ -377,4 +376,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 4100a761c2..ba0c2bc515 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix @@ -390,4 +389,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index 3a6699eba9..b2aa8cd495 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index 5677cd4f15..524a1b1c9a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index e784cb6db1..d59b943ac2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go index bd4952efa5..31e771d53e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 597733813e..9fd77c6cb4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index 16af291899..af10af28cb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index f59b18a977..cc2028af4b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index 721ef59103..c06dd4415a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index 01c43a01fd..9ddbf3e08f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go index f258cfa24e..19a6ee4134 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go index 07919e0ecc..05192a782d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go index 073daad43b..b2e3085819 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index 7a8161c1d1..3e6d57cae7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index 07ed733c51..3a219bdce7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 690cefc3d0..091d107f3a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 5bffc10eac..28ff4ef74d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index d0ba8e9b86..30e405bb4c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 29dc483378..6cbd094a3a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 0a89b28906..7c03b6ee77 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index c8666bb152..422107ee8b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 88fb48a887..505a12acfd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index 698dc975e9..cc986c7900 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 18aa70b426..bbf8399ff5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -2672,6 +2671,7 @@ const ( BPF_PROG_TYPE_LSM = 0x1d BPF_PROG_TYPE_SK_LOOKUP = 0x1e BPF_PROG_TYPE_SYSCALL = 0x1f + BPF_PROG_TYPE_NETFILTER = 0x20 BPF_CGROUP_INET_INGRESS = 0x0 BPF_CGROUP_INET_EGRESS = 0x1 BPF_CGROUP_INET_SOCK_CREATE = 0x2 @@ -2716,6 +2716,11 @@ const ( BPF_PERF_EVENT = 0x29 BPF_TRACE_KPROBE_MULTI = 0x2a BPF_LSM_CGROUP = 0x2b + BPF_STRUCT_OPS = 0x2c + BPF_NETFILTER = 0x2d + BPF_TCX_INGRESS = 0x2e + BPF_TCX_EGRESS = 0x2f + BPF_TRACE_UPROBE_MULTI = 0x30 BPF_LINK_TYPE_UNSPEC = 0x0 BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 BPF_LINK_TYPE_TRACING = 0x2 @@ -2726,6 +2731,18 @@ const ( BPF_LINK_TYPE_PERF_EVENT = 0x7 BPF_LINK_TYPE_KPROBE_MULTI = 0x8 BPF_LINK_TYPE_STRUCT_OPS = 0x9 + BPF_LINK_TYPE_NETFILTER = 0xa + BPF_LINK_TYPE_TCX = 0xb + BPF_LINK_TYPE_UPROBE_MULTI = 0xc + BPF_PERF_EVENT_UNSPEC = 0x0 + BPF_PERF_EVENT_UPROBE = 0x1 + BPF_PERF_EVENT_URETPROBE = 0x2 + BPF_PERF_EVENT_KPROBE = 0x3 + BPF_PERF_EVENT_KRETPROBE = 0x4 + BPF_PERF_EVENT_TRACEPOINT = 0x5 + BPF_PERF_EVENT_EVENT = 0x6 + BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_UPROBE_MULTI_RETURN = 0x1 BPF_ANY = 0x0 BPF_NOEXIST = 0x1 BPF_EXIST = 0x2 @@ -2743,6 +2760,8 @@ const ( BPF_F_MMAPABLE = 0x400 BPF_F_PRESERVE_ELEMS = 0x800 BPF_F_INNER_MAP = 0x1000 + BPF_F_LINK = 0x2000 + BPF_F_PATH_FD = 0x4000 BPF_STATS_RUN_TIME = 0x0 BPF_STACK_BUILD_ID_EMPTY = 0x0 BPF_STACK_BUILD_ID_VALID = 0x1 @@ -2763,6 +2782,7 @@ const ( BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_DONT_FRAGMENT = 0x4 BPF_F_SEQ_NUMBER = 0x8 + BPF_F_NO_TUNNEL_KEY = 0x10 BPF_F_TUNINFO_FLAGS = 0x10 BPF_F_INDEX_MASK = 0xffffffff BPF_F_CURRENT_CPU = 0xffffffff @@ -2779,6 +2799,8 @@ const ( BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 0x40 + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 0x80 + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 0x100 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_F_SYSCTL_BASE_NAME = 0x1 @@ -2867,6 +2889,8 @@ const ( BPF_DEVCG_DEV_CHAR = 0x2 BPF_FIB_LOOKUP_DIRECT = 0x1 BPF_FIB_LOOKUP_OUTPUT = 0x2 + BPF_FIB_LOOKUP_SKIP_NEIGH = 0x4 + BPF_FIB_LOOKUP_TBID = 0x8 BPF_FIB_LKUP_RET_SUCCESS = 0x0 BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 @@ -2902,6 +2926,7 @@ const ( BPF_CORE_ENUMVAL_EXISTS = 0xa BPF_CORE_ENUMVAL_VALUE = 0xb BPF_CORE_TYPE_MATCHES = 0xc + BPF_F_TIMER_ABS = 0x1 ) const ( @@ -2980,6 +3005,12 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } +type LoopConfig struct { + Fd uint32 + Size uint32 + Info LoopInfo64 + _ [8]uint64 +} type TIPCSocketAddr struct { Ref uint32 @@ -5883,3 +5914,15 @@ type SchedAttr struct { } const SizeofSchedAttr = 0x38 + +type Cachestat_t struct { + Cache uint64 + Dirty uint64 + Writeback uint64 + Evicted uint64 + Recently_evicted uint64 +} +type CachestatRange struct { + Off uint64 + Len uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 6d8acbcc57..438a30affa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 59293c6884..adceca3553 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 40cfa38c29..eeaa00a37d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 055bc4216d..6739aa91d4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index f28affbc60..9920ef6317 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 9d71e7ccd8..2923b799a4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index fd5ccd332a..ce2750ee41 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 7704de77a2..3038811d70 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index df00b87571..efc6fed18c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 0942840db6..9a654b75a9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 0348743950..40d358e33e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index bad0670475..148c6ceb86 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 1b4c97c32a..72ba81543e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index aa268d025c..71e765508e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 444045b6c5..4abbdb9de9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 9bc4c8f9d8..f22e7947d9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index bb05f655d2..066a7d83d2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index db40e3a19c..439548ec9a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 11121151cc..16085d3bbc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 26eba23b72..afd13a3af7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 5a54798869..5d97f1f9b6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index be58c4e1ff..34871cdc15 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 52338266cb..5911bceb31 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 605cfdb12b..e4f24f3bc9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go index d6724c0102..ca50a79303 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go index ddfd27a434..d7d7f79023 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 0400747c67..14160576d2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index aec1efcb30..54f31be637 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on ztypes_linux_s390x.go // TODO: auto-generate. diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index a20ebea633..ce2d713d62 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && go1.9 -// +build windows,go1.9 package windows diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s index fdbbbcd317..ba64caca5d 100644 --- a/vendor/golang.org/x/sys/windows/empty.s +++ b/vendor/golang.org/x/sys/windows/empty.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.12 -// +build !go1.12 // This file is here to allow bodyless functions with go:linkname for Go 1.11 // and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go index 2cd60645ee..6c366955d9 100644 --- a/vendor/golang.org/x/sys/windows/eventlog.go +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index 8563f79c57..dbcdb090c0 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build generate -// +build generate package windows diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go index 9196b089ca..0f1bdc3860 100644 --- a/vendor/golang.org/x/sys/windows/race.go +++ b/vendor/golang.org/x/sys/windows/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && race -// +build windows,race package windows diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go index 7bae4817a0..0c78da78b1 100644 --- a/vendor/golang.org/x/sys/windows/race0.go +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && !race -// +build windows,!race package windows diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index c44a1b9636..a9dc6308d6 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go index 4fc01434e4..6a4f9ce6aa 100644 --- a/vendor/golang.org/x/sys/windows/str.go +++ b/vendor/golang.org/x/sys/windows/str.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index 8732cdb957..e85ed6b9c8 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows // Package windows contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 35cfc57ca8..47dc579676 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -155,6 +155,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW //sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW //sys SetDefaultDllDirectories(directoryFlags uint32) (err error) +//sys AddDllDirectory(path *uint16) (cookie uintptr, err error) = kernel32.AddDllDirectory +//sys RemoveDllDirectory(cookie uintptr) (err error) = kernel32.RemoveDllDirectory //sys SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW //sys GetVersion() (ver uint32, err error) //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW @@ -233,6 +235,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock //sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 +//sys GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW @@ -969,7 +972,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { if n > 0 { sl += int32(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index b88dc7c85e..359780f6ac 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1094,7 +1094,33 @@ const ( SOMAXCONN = 0x7fffffff - TCP_NODELAY = 1 + TCP_NODELAY = 1 + TCP_EXPEDITED_1122 = 2 + TCP_KEEPALIVE = 3 + TCP_MAXSEG = 4 + TCP_MAXRT = 5 + TCP_STDURG = 6 + TCP_NOURG = 7 + TCP_ATMARK = 8 + TCP_NOSYNRETRIES = 9 + TCP_TIMESTAMPS = 10 + TCP_OFFLOAD_PREFERENCE = 11 + TCP_CONGESTION_ALGORITHM = 12 + TCP_DELAY_FIN_ACK = 13 + TCP_MAXRTMS = 14 + TCP_FASTOPEN = 15 + TCP_KEEPCNT = 16 + TCP_KEEPIDLE = TCP_KEEPALIVE + TCP_KEEPINTVL = 17 + TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18 + TCP_ICMP_ERROR_INFO = 19 + + UDP_NOCHECKSUM = 1 + UDP_SEND_MSG_SIZE = 2 + UDP_RECV_MAX_COALESCED_SIZE = 3 + UDP_CHECKSUM_COVERAGE = 20 + + UDP_COALESCED_INFO = 3 SHUT_RD = 0 SHUT_WR = 1 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 8b1688de4c..146a1f0196 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -184,6 +184,7 @@ var ( procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") @@ -253,6 +254,7 @@ var ( procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFileTime = modkernel32.NewProc("GetFileTime") procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") @@ -329,6 +331,7 @@ var ( procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory") procReleaseMutex = modkernel32.NewProc("ReleaseMutex") procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procRemoveDllDirectory = modkernel32.NewProc("RemoveDllDirectory") procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") @@ -1604,6 +1607,15 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func AddDllDirectory(path *uint16) (cookie uintptr, err error) { + r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + cookie = uintptr(r0) + if cookie == 0 { + err = errnoErr(e1) + } + return +} + func AssignProcessToJobObject(job Handle, process Handle) (err error) { r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) if r1 == 0 { @@ -2185,6 +2197,14 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, return } +func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetFileType(filehandle Handle) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) n = uint32(r0) @@ -2870,6 +2890,14 @@ func RemoveDirectory(path *uint16) (err error) { return } +func RemoveDllDirectory(cookie uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ResetEvent(event Handle) (err error) { r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) if r1 == 0 { diff --git a/vendor/golang.org/x/term/term_unix.go b/vendor/golang.org/x/term/term_unix.go index 62c2b3f41f..1ad0ddfe30 100644 --- a/vendor/golang.org/x/term/term_unix.go +++ b/vendor/golang.org/x/term/term_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package term diff --git a/vendor/golang.org/x/term/term_unix_bsd.go b/vendor/golang.org/x/term/term_unix_bsd.go index 853b3d6986..9dbf546298 100644 --- a/vendor/golang.org/x/term/term_unix_bsd.go +++ b/vendor/golang.org/x/term/term_unix_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd package term diff --git a/vendor/golang.org/x/term/term_unix_other.go b/vendor/golang.org/x/term/term_unix_other.go index 1e8955c934..1b36de799a 100644 --- a/vendor/golang.org/x/term/term_unix_other.go +++ b/vendor/golang.org/x/term/term_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || linux || solaris || zos -// +build aix linux solaris zos package term diff --git a/vendor/golang.org/x/term/term_unsupported.go b/vendor/golang.org/x/term/term_unsupported.go index f1df850651..3c409e5885 100644 --- a/vendor/golang.org/x/term/term_unsupported.go +++ b/vendor/golang.org/x/term/term_unsupported.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !zos && !windows && !solaris && !plan9 -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!zos,!windows,!solaris,!plan9 package term diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go index 8a7392c4a1..784bb88087 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 package bidirule diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go index bb0a920018..8e1e943955 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 package bidirule diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index 42fa8d72ce..d2bd71181d 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index 56a0e1ea21..f76bdca273 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go index baacf32b43..3aa2c3bdf8 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go index ffadb7bebd..a713757906 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go index 92cce5802c..f15746f7df 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go index f517fdb202..c164d37917 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package bidi diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index f5a0788277..1af161c756 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index cb7239c437..eb73ecc373 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 11b2733001..276cb8d8c0 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index f65785e8ac..0cceffd731 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go index e1858b879d..b0819e42d0 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index 0175eae50a..bf65457d9b 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package norm diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index f0e0cf3cb1..8f6c7f493f 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -52,6 +52,8 @@ func Every(interval time.Duration) Limit { // or its associated context.Context is canceled. // // The methods AllowN, ReserveN, and WaitN consume n tokens. +// +// Limiter is safe for simultaneous use by multiple goroutines. type Limiter struct { mu sync.Mutex limit Limit diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index 721053c20a..0569f5dd43 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -2,12 +2,14 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -24,7 +26,6 @@ import ( "time" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" logpb "google.golang.org/appengine/internal/log" @@ -32,8 +33,7 @@ import ( ) const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" + apiPath = "/rpc_http" ) var ( @@ -65,21 +65,22 @@ var ( IdleConnTimeout: 90 * time.Second, }, } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context ) -func apiURL() *url.URL { +func apiURL(ctx context.Context) *url.URL { host, port := "appengine.googleapis.internal", "10001" if h := os.Getenv("API_HOST"); h != "" { host = h } + if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil { + host = hostOverride.(string) + } if p := os.Getenv("API_PORT"); p != "" { port = p } + if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil { + port = portOverride.(string) + } return &url.URL{ Scheme: "http", Host: host + ":" + port, @@ -87,82 +88,97 @@ func apiURL() *url.URL { } } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) +// Middleware wraps an http handler so that it can make GAE API calls +func Middleware(next http.Handler) http.Handler { + return handleHTTPMiddleware(executeRequestSafelyMiddleware(next)) +} - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } +func handleHTTPMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c := &aeContext{ + req: r, + outHeader: w.Header(), + } + r = r.WithContext(withContext(r.Context(), c)) + c.req = r + + stopFlushing := make(chan int) + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) + if logToLogservice() { + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + } - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more + next.ServeHTTP(c, r) + c.outHeader = nil // make sure header changes aren't respected any more - stopFlushing <- 1 // any logging beyond this point will be dropped + flushed := make(chan struct{}) + if logToLogservice() { + stopFlushing <- 1 // any logging beyond this point will be dropped - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + } - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } + if logToLogservice() { + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed + } + }) } -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() +func executeRequestSafelyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if x := recover(); x != nil { + c := w.(*aeContext) + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() - http.DefaultServeMux.ServeHTTP(c, r) + next.ServeHTTP(w, r) + }) } func renderPanic(x interface{}) string { @@ -204,9 +220,9 @@ func renderPanic(x interface{}) string { return string(buf) } -// context represents the context of an in-flight HTTP request. +// aeContext represents the aeContext of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { +type aeContext struct { req *http.Request outCode int @@ -218,8 +234,6 @@ type context struct { lines []*logpb.UserAppLogLine flushes int } - - apiURL *url.URL } var contextKey = "holds a *context" @@ -227,8 +241,8 @@ var contextKey = "holds a *context" // jointContext joins two contexts in a superficial way. // It takes values and timeouts from a base context, and only values from another context. type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context + base context.Context + valuesOnly context.Context } func (c jointContext) Deadline() (time.Time, bool) { @@ -252,94 +266,54 @@ func (c jointContext) Value(key interface{}) interface{} { // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) +func fromContext(ctx context.Context) *aeContext { + c, _ := ctx.Value(&contextKey).(*aeContext) return c } -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c *aeContext) context.Context { + ctx := context.WithValue(parent, &contextKey, c) if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { ctx = withNamespace(ctx, ns) } return ctx } -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) +func toContext(c *aeContext) context.Context { + return withContext(context.Background(), c) } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { return c.req.Header } return nil } -func ReqContext(req *http.Request) netcontext.Context { +func ReqContext(req *http.Request) context.Context { return req.Context() } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { return jointContext{ base: parent, valuesOnly: req.Context(), } } -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - // RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. +// any API calls are sent to the provided URL. // It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} +func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request { + ctx := req.Context() + ctx = withAPIHostOverride(ctx, apiURL.Hostname()) + ctx = withAPIPortOverride(ctx, apiURL.Port()) + ctx = WithAppIDOverride(ctx, appID) + + // use the unregistered request as a placeholder so that withContext can read the headers + c := &aeContext{req: req} + c.req = req.WithContext(withContext(ctx, c)) + return c.req } var errTimeout = &CallError{ @@ -348,7 +322,7 @@ var errTimeout = &CallError{ Timeout: true, } -func (c *context) Header() http.Header { return c.outHeader } +func (c *aeContext) Header() http.Header { return c.outHeader } // Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status // codes do not permit a response body (nor response entity headers such as @@ -365,7 +339,7 @@ func bodyAllowedForStatus(status int) bool { return true } -func (c *context) Write(b []byte) (int, error) { +func (c *aeContext) Write(b []byte) (int, error) { if c.outCode == 0 { c.WriteHeader(http.StatusOK) } @@ -376,7 +350,7 @@ func (c *context) Write(b []byte) (int, error) { return len(b), nil } -func (c *context) WriteHeader(code int) { +func (c *aeContext) WriteHeader(code int) { if c.outCode != 0 { logf(c, 3, "WriteHeader called multiple times on request.") // error level return @@ -384,10 +358,11 @@ func (c *context) WriteHeader(code int) { c.outCode = code } -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { +func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) { + apiURL := apiURL(ctx) hreq := &http.Request{ Method: "POST", - URL: c.apiURL, + URL: apiURL, Header: http.Header{ apiEndpointHeader: apiEndpointHeaderValue, apiMethodHeader: apiMethodHeaderValue, @@ -396,13 +371,16 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) }, Body: ioutil.NopCloser(bytes.NewReader(body)), ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) + Host: apiURL.Host, } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) + c := fromContext(ctx) + if c != nil { + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } } tr := apiHTTPClient.Transport.(*http.Transport) @@ -444,7 +422,7 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) return hrespBody, nil } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -463,15 +441,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) } c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { - return errors.New("transaction context has expired") + return errors.New("transaction aeContext has expired") } applyTransaction(in, &t.transaction) } @@ -487,20 +461,13 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix + ticket := "" + if c != nil { + ticket = c.req.Header.Get(ticketHeader) + if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { + ticket = dri } } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } req := &remotepb.Request{ ServiceName: &service, Method: &method, @@ -512,7 +479,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - hrespBody, err := c.post(hreqBody, timeout) + hrespBody, err := post(ctx, hreqBody, timeout) if err != nil { return err } @@ -549,11 +516,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return proto.Unmarshal(res.Response, out) } -func (c *context) Request() *http.Request { +func (c *aeContext) Request() *http.Request { return c.req } -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { +func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) { // Truncate long log lines. // TODO(dsymonds): Check if this is still necessary. const lim = 8 << 10 @@ -575,18 +542,20 @@ var logLevelName = map[int64]string{ 4: "CRITICAL", } -func logf(c *context, level int64, format string, args ...interface{}) { +func logf(c *aeContext, level int64, format string, args ...interface{}) { if c == nil { - panic("not an App Engine context") + panic("not an App Engine aeContext") } s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation + if logToLogservice() { + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + } + // Log to stdout if not deployed if !IsSecondGen() { log.Print(logLevelName[level] + ": " + s) } @@ -594,7 +563,7 @@ func logf(c *context, level int64, format string, args ...interface{}) { // flushLog attempts to flush any pending logs to the appserver. // It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { +func (c *aeContext) flushLog(force bool) (flushed bool) { c.pendingLogs.Lock() // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. n, rem := 0, 30<<20 @@ -655,7 +624,7 @@ const ( forceFlushInterval = 60 * time.Second ) -func (c *context) logFlusher(stop <-chan int) { +func (c *aeContext) logFlusher(stop <-chan int) { lastFlush := time.Now() tick := time.NewTicker(flushInterval) for { @@ -673,6 +642,12 @@ func (c *context) logFlusher(stop <-chan int) { } } -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return toContext(&aeContext{req: req}) +} + +func logToLogservice() bool { + // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json + // where $LOG_DIR is /var/log in prod and some tmpdir in dev + return os.Getenv("LOG_TO_LOGSERVICE") != "0" } diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go index f0f40b2e35..87c33c798e 100644 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -2,11 +2,13 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( + "context" "errors" "fmt" "net/http" @@ -17,20 +19,19 @@ import ( basepb "appengine_internal/base" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) var contextKey = "holds an appengine.Context" // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { +func fromContext(ctx context.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { +func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) { c := fromContext(ctx) if c == nil { return nil, errNotAppEngineContext @@ -38,8 +39,8 @@ func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error return c, nil } -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c appengine.Context) context.Context { + ctx := context.WithValue(parent, &contextKey, c) s := &basepb.StringProto{} c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) @@ -50,7 +51,7 @@ func withContext(parent netcontext.Context, c appengine.Context) netcontext.Cont return ctx } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { if req, ok := c.Request().(*http.Request); ok { return req.Header @@ -59,11 +60,11 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) +func ReqContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { c := appengine.NewContext(req) return withContext(parent, c) } @@ -83,11 +84,11 @@ func (t *testingContext) Call(service, method string, _, _ appengine_internal.Pr } func (t *testingContext) Request() interface{} { return t.req } -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return withContext(context.Background(), &testingContext{req: req}) } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -144,8 +145,8 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") +func Middleware(next http.Handler) http.Handler { + panic("Middleware called; this should be impossible") } func logf(c appengine.Context, level int64, format string, args ...interface{}) { diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go index e0c0b214b7..5b95c13d92 100644 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -5,20 +5,26 @@ package internal import ( + "context" "errors" "os" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) +type ctxKey string + +func (c ctxKey) String() string { + return "appengine context key: " + string(c) +} + var errNotAppEngineContext = errors.New("not an App Engine context") -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error +type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { +func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context { // We avoid appending to any existing call override // so we don't risk overwriting a popped stack below. var cofs []CallOverrideFunc @@ -26,10 +32,10 @@ func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Con cofs = append(cofs, uf...) } cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) + return context.WithValue(ctx, &callOverrideKey, cofs) } -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { +func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) { cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) if len(cofs) == 0 { return nil, nil, false @@ -37,7 +43,7 @@ func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netconte // We found a list of overrides; grab the last, and reconstitute a // context that will hide it. f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) return f, ctx, true } @@ -45,23 +51,35 @@ type logOverrideFunc func(level int64, format string, args ...interface{}) var logOverrideKey = "holds a logOverrideFunc" -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) +func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context { + return context.WithValue(ctx, &logOverrideKey, f) } var appIDOverrideKey = "holds a string, being the full app ID" -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +func WithAppIDOverride(ctx context.Context, appID string) context.Context { + return context.WithValue(ctx, &appIDOverrideKey, appID) +} + +var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST") + +func withAPIHostOverride(ctx context.Context, apiHost string) context.Context { + return context.WithValue(ctx, apiHostOverrideKey, apiHost) +} + +var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT") + +func withAPIPortOverride(ctx context.Context, apiPort string) context.Context { + return context.WithValue(ctx, apiPortOverrideKey, apiPort) } var namespaceKey = "holds the namespace string" -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) +func withNamespace(ctx context.Context, ns string) context.Context { + return context.WithValue(ctx, &namespaceKey, ns) } -func NamespaceFromContext(ctx netcontext.Context) string { +func NamespaceFromContext(ctx context.Context) string { // If there's no namespace, return the empty string. ns, _ := ctx.Value(&namespaceKey).(string) return ns @@ -70,14 +88,14 @@ func NamespaceFromContext(ctx netcontext.Context) string { // FullyQualifiedAppID returns the fully-qualified application ID. // This may contain a partition prefix (e.g. "s~" for High Replication apps), // or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { +func FullyQualifiedAppID(ctx context.Context) string { if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { return id } return fullyQualifiedAppID(ctx) } -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { +func Logf(ctx context.Context, level int64, format string, args ...interface{}) { if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { f(level, format, args...) return @@ -90,7 +108,7 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{ } // NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { +func NamespacedContext(ctx context.Context, namespace string) context.Context { return withNamespace(ctx, namespace) } diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go index 9b4134e425..0f95aa91d5 100644 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -5,9 +5,8 @@ package internal import ( + "context" "os" - - netcontext "golang.org/x/net/context" ) var ( @@ -23,7 +22,7 @@ var ( // AppID is the implementation of the wrapper function of the same name in // ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { +func AppID(c context.Context) string { return appID(FullyQualifiedAppID(c)) } @@ -35,7 +34,7 @@ func IsStandard() bool { return appengineStandard || IsSecondGen() } -// IsStandard is the implementation of the wrapper function of the same name in +// IsSecondGen is the implementation of the wrapper function of the same name in // ../appengine.go. See that file for commentary. func IsSecondGen() bool { // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go index 4e979f45e3..5ad3548bf7 100644 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -2,21 +2,22 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( - "appengine" + "context" - netcontext "golang.org/x/net/context" + "appengine" ) func init() { appengineStandard = true } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -24,12 +25,12 @@ func DefaultVersionHostname(ctx netcontext.Context) string { return appengine.DefaultVersionHostname(c) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } +func Datacenter(_ context.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -37,14 +38,14 @@ func RequestID(ctx netcontext.Context) string { return appengine.RequestID(c) } -func ModuleName(ctx netcontext.Context) string { +func ModuleName(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) } return appengine.ModuleName(c) } -func VersionID(ctx netcontext.Context) string { +func VersionID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -52,7 +53,7 @@ func VersionID(ctx netcontext.Context) string { return appengine.VersionID(c) } -func fullyQualifiedAppID(ctx netcontext.Context) string { +func fullyQualifiedAppID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go index d5e2e7b5e3..4201b6b585 100644 --- a/vendor/google.golang.org/appengine/internal/identity_flex.go +++ b/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appenginevm // +build appenginevm package internal diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go index 5d80672635..18ddda3a42 100644 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -2,17 +2,17 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( + "context" "log" "net/http" "os" "strings" - - netcontext "golang.org/x/net/context" ) // These functions are implementations of the wrapper functions @@ -24,7 +24,7 @@ const ( hDatacenter = "X-AppEngine-Datacenter" ) -func ctxHeaders(ctx netcontext.Context) http.Header { +func ctxHeaders(ctx context.Context) http.Header { c := fromContext(ctx) if c == nil { return nil @@ -32,15 +32,15 @@ func ctxHeaders(ctx netcontext.Context) http.Header { return c.Request().Header } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { return ctxHeaders(ctx).Get(hDefaultVersionHostname) } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { return ctxHeaders(ctx).Get(hRequestLogId) } -func Datacenter(ctx netcontext.Context) string { +func Datacenter(ctx context.Context) string { if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { return dc } @@ -71,7 +71,7 @@ func ServerSoftware() string { // TODO(dsymonds): Remove the metadata fetches. -func ModuleName(_ netcontext.Context) string { +func ModuleName(_ context.Context) string { if s := os.Getenv("GAE_MODULE_NAME"); s != "" { return s } @@ -81,7 +81,7 @@ func ModuleName(_ netcontext.Context) string { return string(mustGetMetadata("instance/attributes/gae_backend_name")) } -func VersionID(_ netcontext.Context) string { +func VersionID(_ context.Context) string { if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { return s1 + "." + s2 } @@ -112,7 +112,7 @@ func partitionlessAppID() string { return string(mustGetMetadata("instance/attributes/gae_project")) } -func fullyQualifiedAppID(_ netcontext.Context) string { +func fullyQualifiedAppID(_ context.Context) string { if s := os.Getenv("GAE_APPLICATION"); s != "" { return s } @@ -130,5 +130,5 @@ func fullyQualifiedAppID(_ netcontext.Context) string { } func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev" } diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go index 1e765312fd..afd0ae84fd 100644 --- a/vendor/google.golang.org/appengine/internal/main.go +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go index ddb79a3338..86a8caf06f 100644 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal @@ -29,7 +30,7 @@ func Main() { if IsDevAppServer() { host = "127.0.0.1" } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go index 9006ae6538..2ae8ab9fa4 100644 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -7,11 +7,11 @@ package internal // This file implements hooks for applying datastore transactions. import ( + "context" "errors" "reflect" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/datastore" @@ -38,13 +38,13 @@ func applyTransaction(pb proto.Message, t *pb.Transaction) { var transactionKey = "used for *Transaction" -func transactionFromContext(ctx netcontext.Context) *transaction { +func transactionFromContext(ctx context.Context) *transaction { t, _ := ctx.Value(&transactionKey).(*transaction) return t } -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) +func withTransaction(ctx context.Context, t *transaction) context.Context { + return context.WithValue(ctx, &transactionKey, t) } type transaction struct { @@ -54,7 +54,7 @@ type transaction struct { var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { +func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { if transactionFromContext(c) != nil { return nil, errors.New("nested transactions are not supported") } diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go index 6ffe1e6d90..6c0d72418d 100644 --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -7,6 +7,7 @@ package urlfetch // import "google.golang.org/appengine/urlfetch" import ( + "context" "errors" "fmt" "io" @@ -18,7 +19,6 @@ import ( "time" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/urlfetch" @@ -44,11 +44,10 @@ type Transport struct { var _ http.RoundTripper = (*Transport)(nil) // Client returns an *http.Client using a default urlfetch Transport. This -// client will have the default deadline of 5 seconds, and will check the -// validity of SSL certificates. +// client will check the validity of SSL certificates. // -// Any deadline of the provided context will be used for requests through this client; -// if the client does not have a deadline then a 5 second default is used. +// Any deadline of the provided context will be used for requests through this client. +// If the client does not have a deadline, then an App Engine default of 60 second is used. func Client(ctx context.Context) *http.Client { return &http.Client{ Transport: &Transport{ diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index 575456c838..bd6b17e158 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -226,7 +226,7 @@ type ValidatingAdmissionPolicySpec struct { // +listType=map // +listMapKey=name // +optional - Variables []Variable `json:"variables" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` + Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` } type MatchCondition v1.MatchCondition diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index c199702fbd..12c680dc97 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -242,7 +242,7 @@ type ValidatingAdmissionPolicySpec struct { // +listType=map // +listMapKey=name // +optional - Variables []Variable `json:"variables" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` + Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` } // ParamKind is a tuple of Group Kind and Version. diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 4f0822440f..03547dacb4 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -229,8 +229,8 @@ message JobSpec { // batch.kubernetes.io/job-index-failure-count annotation. It can only // be set when Job's completionMode=Indexed, and the Pod's restart // policy is Never. The field is immutable. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional optional int32 backoffLimitPerIndex = 12; @@ -242,8 +242,8 @@ message JobSpec { // It can only be specified when backoffLimitPerIndex is set. // It can be null or up to completions. It is required and must be // less than or equal to 10^4 when is completions greater than 10^5. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional optional int32 maxFailedIndexes = 13; @@ -398,8 +398,8 @@ message JobStatus { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional optional string failedIndexes = 10; @@ -421,9 +421,6 @@ message JobStatus { optional UncountedTerminatedPods uncountedTerminatedPods = 8; // The number of pods which have a Ready condition. - // - // This field is beta-level. The job controller populates the field when - // the feature gate JobReadyPods is enabled (enabled by default). // +optional optional int32 ready = 9; } @@ -512,8 +509,8 @@ message PodFailurePolicyRule { // running pods are terminated. // - FailIndex: indicates that the pod's index is marked as Failed and will // not be restarted. - // This value is alpha-level. It can be used when the - // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). + // This value is beta-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). // - Ignore: indicates that the counter towards the .backoffLimit is not // incremented and a replacement pod is created. // - Count: indicates that the pod is handled in the default way - the diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 8a28614c0b..2a77b7b08f 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -124,6 +124,7 @@ const ( // This is an action which might be taken on a pod failure - mark the // Job's index as failed to avoid restarts within this index. This action // can only be used when backoffLimitPerIndex is set. + // This value is beta-level. PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex" // This is an action which might be taken on a pod failure - the counter towards @@ -218,8 +219,8 @@ type PodFailurePolicyRule struct { // running pods are terminated. // - FailIndex: indicates that the pod's index is marked as Failed and will // not be restarted. - // This value is alpha-level. It can be used when the - // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). + // This value is beta-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). // - Ignore: indicates that the counter towards the .backoffLimit is not // incremented and a replacement pod is created. // - Count: indicates that the pod is handled in the default way - the @@ -303,8 +304,8 @@ type JobSpec struct { // batch.kubernetes.io/job-index-failure-count annotation. It can only // be set when Job's completionMode=Indexed, and the Pod's restart // policy is Never. The field is immutable. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty" protobuf:"varint,12,opt,name=backoffLimitPerIndex"` @@ -316,8 +317,8 @@ type JobSpec struct { // It can only be specified when backoffLimitPerIndex is set. // It can be null or up to completions. It is required and must be // less than or equal to 10^4 when is completions greater than 10^5. - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty" protobuf:"varint,13,opt,name=maxFailedIndexes"` @@ -477,8 +478,8 @@ type JobStatus struct { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". - // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (disabled by default). + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). // +optional FailedIndexes *string `json:"failedIndexes,omitempty" protobuf:"bytes,10,opt,name=failedIndexes"` @@ -500,9 +501,6 @@ type JobStatus struct { UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"` // The number of pods which have a Ready condition. - // - // This field is beta-level. The job controller populates the field when - // the feature gate JobReadyPods is enabled (enabled by default). // +optional Ready *int32 `json:"ready,omitempty" protobuf:"varint,9,opt,name=ready"` } @@ -535,6 +533,25 @@ const ( JobFailureTarget JobConditionType = "FailureTarget" ) +const ( + // JobReasonPodFailurePolicy reason indicates a job failure condition is added due to + // a failed pod matching a pod failure policy rule + // https://kep.k8s.io/3329 + // This is currently a beta field. + JobReasonPodFailurePolicy string = "PodFailurePolicy" + // JobReasonBackOffLimitExceeded reason indicates that pods within a job have failed a number of + // times higher than backOffLimit times. + JobReasonBackoffLimitExceeded string = "BackoffLimitExceeded" + // JobReasponDeadlineExceeded means job duration is past ActiveDeadline + JobReasonDeadlineExceeded string = "DeadlineExceeded" + // JobReasonMaxFailedIndexesExceeded indicates that an indexed of a job failed + // This const is used in beta-level feature: https://kep.k8s.io/3850. + JobReasonMaxFailedIndexesExceeded string = "MaxFailedIndexesExceeded" + // JobReasonFailedIndexes means Job has failed indexes. + // This const is used in beta-level feature: https://kep.k8s.io/3850. + JobReasonFailedIndexes string = "FailedIndexes" +) + // JobCondition describes current state of a job. type JobCondition struct { // Type of job condition, Complete or Failed. diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 43b4e1e7d9..51a155203b 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -117,8 +117,8 @@ var map_JobSpec = map[string]string{ "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", - "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", - "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "template": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", @@ -142,9 +142,9 @@ var map_JobStatus = map[string]string{ "failed": "The number of pods which reached phase Failed.", "terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is alpha-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (disabled by default).", "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", - "failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", - "ready": "The number of pods which have a Ready condition.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default).", + "ready": "The number of pods which have a Ready condition.", } func (JobStatus) SwaggerDoc() map[string]string { @@ -193,7 +193,7 @@ func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string { var map_PodFailurePolicyRule = map[string]string{ "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", - "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is alpha-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", "onExitCodes": "Represents the requirement on the container exit codes.", "onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.", } diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index c267a5febd..04c7939e0d 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -525,10 +525,38 @@ func (m *ClientIPConfig) XXX_DiscardUnknown() { var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo +func (m *ClusterTrustBundleProjection) Reset() { *m = ClusterTrustBundleProjection{} } +func (*ClusterTrustBundleProjection) ProtoMessage() {} +func (*ClusterTrustBundleProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{17} +} +func (m *ClusterTrustBundleProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleProjection.Merge(m, src) +} +func (m *ClusterTrustBundleProjection) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleProjection) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleProjection.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleProjection proto.InternalMessageInfo + func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (*ComponentCondition) ProtoMessage() {} func (*ComponentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{17} + return fileDescriptor_83c10c24ec417dc9, []int{18} } func (m *ComponentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +584,7 @@ var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (*ComponentStatus) ProtoMessage() {} func (*ComponentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{18} + return fileDescriptor_83c10c24ec417dc9, []int{19} } func (m *ComponentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +612,7 @@ var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (*ComponentStatusList) ProtoMessage() {} func (*ComponentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{19} + return fileDescriptor_83c10c24ec417dc9, []int{20} } func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +640,7 @@ var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} func (*ConfigMap) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{20} + return fileDescriptor_83c10c24ec417dc9, []int{21} } func (m *ConfigMap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +668,7 @@ var xxx_messageInfo_ConfigMap proto.InternalMessageInfo func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (*ConfigMapEnvSource) ProtoMessage() {} func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{21} + return fileDescriptor_83c10c24ec417dc9, []int{22} } func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +696,7 @@ var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{22} + return fileDescriptor_83c10c24ec417dc9, []int{23} } func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +724,7 @@ var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} func (*ConfigMapList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{23} + return fileDescriptor_83c10c24ec417dc9, []int{24} } func (m *ConfigMapList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +752,7 @@ var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } func (*ConfigMapNodeConfigSource) ProtoMessage() {} func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{24} + return fileDescriptor_83c10c24ec417dc9, []int{25} } func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +780,7 @@ var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (*ConfigMapProjection) ProtoMessage() {} func (*ConfigMapProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{25} + return fileDescriptor_83c10c24ec417dc9, []int{26} } func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +808,7 @@ var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{26} + return fileDescriptor_83c10c24ec417dc9, []int{27} } func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +836,7 @@ var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} func (*Container) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{27} + return fileDescriptor_83c10c24ec417dc9, []int{28} } func (m *Container) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +864,7 @@ var xxx_messageInfo_Container proto.InternalMessageInfo func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} func (*ContainerImage) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{28} + return fileDescriptor_83c10c24ec417dc9, []int{29} } func (m *ContainerImage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +892,7 @@ var xxx_messageInfo_ContainerImage proto.InternalMessageInfo func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} func (*ContainerPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{29} + return fileDescriptor_83c10c24ec417dc9, []int{30} } func (m *ContainerPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +920,7 @@ var xxx_messageInfo_ContainerPort proto.InternalMessageInfo func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} } func (*ContainerResizePolicy) ProtoMessage() {} func (*ContainerResizePolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{30} + return fileDescriptor_83c10c24ec417dc9, []int{31} } func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +948,7 @@ var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} func (*ContainerState) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{31} + return fileDescriptor_83c10c24ec417dc9, []int{32} } func (m *ContainerState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +976,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} func (*ContainerStateRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{32} + return fileDescriptor_83c10c24ec417dc9, []int{33} } func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +1004,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{33} + return fileDescriptor_83c10c24ec417dc9, []int{34} } func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +1032,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{34} + return fileDescriptor_83c10c24ec417dc9, []int{35} } func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1060,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{35} + return fileDescriptor_83c10c24ec417dc9, []int{36} } func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1088,7 @@ var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} func (*DaemonEndpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{36} + return fileDescriptor_83c10c24ec417dc9, []int{37} } func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1116,7 @@ var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{37} + return fileDescriptor_83c10c24ec417dc9, []int{38} } func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1144,7 @@ var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{38} + return fileDescriptor_83c10c24ec417dc9, []int{39} } func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +1172,7 @@ var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{39} + return fileDescriptor_83c10c24ec417dc9, []int{40} } func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1200,7 @@ var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{40} + return fileDescriptor_83c10c24ec417dc9, []int{41} } func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1228,7 @@ var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} func (*EndpointAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{41} + return fileDescriptor_83c10c24ec417dc9, []int{42} } func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1256,7 @@ var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{42} + return fileDescriptor_83c10c24ec417dc9, []int{43} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1284,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} func (*EndpointSubset) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{43} + return fileDescriptor_83c10c24ec417dc9, []int{44} } func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1312,7 @@ var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} func (*Endpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{44} + return fileDescriptor_83c10c24ec417dc9, []int{45} } func (m *Endpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1340,7 @@ var xxx_messageInfo_Endpoints proto.InternalMessageInfo func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} func (*EndpointsList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{45} + return fileDescriptor_83c10c24ec417dc9, []int{46} } func (m *EndpointsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1368,7 @@ var xxx_messageInfo_EndpointsList proto.InternalMessageInfo func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} func (*EnvFromSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{46} + return fileDescriptor_83c10c24ec417dc9, []int{47} } func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1396,7 @@ var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} func (*EnvVar) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{47} + return fileDescriptor_83c10c24ec417dc9, []int{48} } func (m *EnvVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1424,7 @@ var xxx_messageInfo_EnvVar proto.InternalMessageInfo func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} func (*EnvVarSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{48} + return fileDescriptor_83c10c24ec417dc9, []int{49} } func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1452,7 @@ var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } func (*EphemeralContainer) ProtoMessage() {} func (*EphemeralContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{49} + return fileDescriptor_83c10c24ec417dc9, []int{50} } func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1480,7 @@ var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } func (*EphemeralContainerCommon) ProtoMessage() {} func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{50} + return fileDescriptor_83c10c24ec417dc9, []int{51} } func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1508,7 @@ var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} } func (*EphemeralVolumeSource) ProtoMessage() {} func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{51} + return fileDescriptor_83c10c24ec417dc9, []int{52} } func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,7 +1536,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{52} + return fileDescriptor_83c10c24ec417dc9, []int{53} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1564,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{53} + return fileDescriptor_83c10c24ec417dc9, []int{54} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1592,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{54} + return fileDescriptor_83c10c24ec417dc9, []int{55} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1620,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} func (*EventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{55} + return fileDescriptor_83c10c24ec417dc9, []int{56} } func (m *EventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1620,7 +1648,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} func (*ExecAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{56} + return fileDescriptor_83c10c24ec417dc9, []int{57} } func (m *ExecAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1676,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} func (*FCVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{57} + return fileDescriptor_83c10c24ec417dc9, []int{58} } func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1704,7 @@ var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } func (*FlexPersistentVolumeSource) ProtoMessage() {} func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{58} + return fileDescriptor_83c10c24ec417dc9, []int{59} } func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1704,7 +1732,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} func (*FlexVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{59} + return fileDescriptor_83c10c24ec417dc9, []int{60} } func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1732,7 +1760,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{60} + return fileDescriptor_83c10c24ec417dc9, []int{61} } func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,7 +1788,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{61} + return fileDescriptor_83c10c24ec417dc9, []int{62} } func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1788,7 +1816,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo func (m *GRPCAction) Reset() { *m = GRPCAction{} } func (*GRPCAction) ProtoMessage() {} func (*GRPCAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{62} + return fileDescriptor_83c10c24ec417dc9, []int{63} } func (m *GRPCAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1816,7 +1844,7 @@ var xxx_messageInfo_GRPCAction proto.InternalMessageInfo func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{63} + return fileDescriptor_83c10c24ec417dc9, []int{64} } func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1872,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{64} + return fileDescriptor_83c10c24ec417dc9, []int{65} } func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1872,7 +1900,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{65} + return fileDescriptor_83c10c24ec417dc9, []int{66} } func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1900,7 +1928,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} func (*HTTPGetAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{66} + return fileDescriptor_83c10c24ec417dc9, []int{67} } func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1928,7 +1956,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{67} + return fileDescriptor_83c10c24ec417dc9, []int{68} } func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1956,7 +1984,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} func (*HostAlias) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{68} + return fileDescriptor_83c10c24ec417dc9, []int{69} } func (m *HostAlias) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1984,7 +2012,7 @@ var xxx_messageInfo_HostAlias proto.InternalMessageInfo func (m *HostIP) Reset() { *m = HostIP{} } func (*HostIP) ProtoMessage() {} func (*HostIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{69} + return fileDescriptor_83c10c24ec417dc9, []int{70} } func (m *HostIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2012,7 +2040,7 @@ var xxx_messageInfo_HostIP proto.InternalMessageInfo func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{70} + return fileDescriptor_83c10c24ec417dc9, []int{71} } func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2040,7 +2068,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{71} + return fileDescriptor_83c10c24ec417dc9, []int{72} } func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2068,7 +2096,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{72} + return fileDescriptor_83c10c24ec417dc9, []int{73} } func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2096,7 +2124,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{73} + return fileDescriptor_83c10c24ec417dc9, []int{74} } func (m *KeyToPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2124,7 +2152,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{74} + return fileDescriptor_83c10c24ec417dc9, []int{75} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2152,7 +2180,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } func (*LifecycleHandler) ProtoMessage() {} func (*LifecycleHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{75} + return fileDescriptor_83c10c24ec417dc9, []int{76} } func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2180,7 +2208,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{76} + return fileDescriptor_83c10c24ec417dc9, []int{77} } func (m *LimitRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2208,7 +2236,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{77} + return fileDescriptor_83c10c24ec417dc9, []int{78} } func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2236,7 +2264,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{78} + return fileDescriptor_83c10c24ec417dc9, []int{79} } func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2292,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{79} + return fileDescriptor_83c10c24ec417dc9, []int{80} } func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2320,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{80} + return fileDescriptor_83c10c24ec417dc9, []int{81} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2320,7 +2348,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{81} + return fileDescriptor_83c10c24ec417dc9, []int{82} } func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2348,7 +2376,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{82} + return fileDescriptor_83c10c24ec417dc9, []int{83} } func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2404,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{83} + return fileDescriptor_83c10c24ec417dc9, []int{84} } func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2404,7 +2432,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{84} + return fileDescriptor_83c10c24ec417dc9, []int{85} } func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2429,10 +2457,38 @@ func (m *LocalVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo +func (m *ModifyVolumeStatus) Reset() { *m = ModifyVolumeStatus{} } +func (*ModifyVolumeStatus) ProtoMessage() {} +func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{86} +} +func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ModifyVolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ModifyVolumeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModifyVolumeStatus.Merge(m, src) +} +func (m *ModifyVolumeStatus) XXX_Size() int { + return m.Size() +} +func (m *ModifyVolumeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ModifyVolumeStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo + func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{85} + return fileDescriptor_83c10c24ec417dc9, []int{87} } func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2460,7 +2516,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{86} + return fileDescriptor_83c10c24ec417dc9, []int{88} } func (m *Namespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2544,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } func (*NamespaceCondition) ProtoMessage() {} func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{87} + return fileDescriptor_83c10c24ec417dc9, []int{89} } func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2516,7 +2572,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{88} + return fileDescriptor_83c10c24ec417dc9, []int{90} } func (m *NamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2544,7 +2600,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{89} + return fileDescriptor_83c10c24ec417dc9, []int{91} } func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2572,7 +2628,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{90} + return fileDescriptor_83c10c24ec417dc9, []int{92} } func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,7 +2656,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{91} + return fileDescriptor_83c10c24ec417dc9, []int{93} } func (m *Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2628,7 +2684,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{92} + return fileDescriptor_83c10c24ec417dc9, []int{94} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2656,7 +2712,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{93} + return fileDescriptor_83c10c24ec417dc9, []int{95} } func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2740,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{94} + return fileDescriptor_83c10c24ec417dc9, []int{96} } func (m *NodeCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2712,7 +2768,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{95} + return fileDescriptor_83c10c24ec417dc9, []int{97} } func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2740,7 +2796,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{96} + return fileDescriptor_83c10c24ec417dc9, []int{98} } func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2768,7 +2824,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{97} + return fileDescriptor_83c10c24ec417dc9, []int{99} } func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2796,7 +2852,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{98} + return fileDescriptor_83c10c24ec417dc9, []int{100} } func (m *NodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2824,7 +2880,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{99} + return fileDescriptor_83c10c24ec417dc9, []int{101} } func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2852,7 +2908,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} func (*NodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{100} + return fileDescriptor_83c10c24ec417dc9, []int{102} } func (m *NodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2880,7 +2936,7 @@ var xxx_messageInfo_NodeResources proto.InternalMessageInfo func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{101} + return fileDescriptor_83c10c24ec417dc9, []int{103} } func (m *NodeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2908,7 +2964,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{102} + return fileDescriptor_83c10c24ec417dc9, []int{104} } func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2936,7 +2992,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{103} + return fileDescriptor_83c10c24ec417dc9, []int{105} } func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2964,7 +3020,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{104} + return fileDescriptor_83c10c24ec417dc9, []int{106} } func (m *NodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2992,7 +3048,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{105} + return fileDescriptor_83c10c24ec417dc9, []int{107} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +3076,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{106} + return fileDescriptor_83c10c24ec417dc9, []int{108} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3048,7 +3104,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{107} + return fileDescriptor_83c10c24ec417dc9, []int{109} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,7 +3132,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{108} + return fileDescriptor_83c10c24ec417dc9, []int{110} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3104,7 +3160,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{109} + return fileDescriptor_83c10c24ec417dc9, []int{111} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,7 +3188,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{110} + return fileDescriptor_83c10c24ec417dc9, []int{112} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3160,7 +3216,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{111} + return fileDescriptor_83c10c24ec417dc9, []int{113} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3188,7 +3244,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{112} + return fileDescriptor_83c10c24ec417dc9, []int{114} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3216,7 +3272,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{113} + return fileDescriptor_83c10c24ec417dc9, []int{115} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3300,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{114} + return fileDescriptor_83c10c24ec417dc9, []int{116} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3328,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{115} + return fileDescriptor_83c10c24ec417dc9, []int{117} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3356,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{116} + return fileDescriptor_83c10c24ec417dc9, []int{118} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{117} + return fileDescriptor_83c10c24ec417dc9, []int{119} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{118} + return fileDescriptor_83c10c24ec417dc9, []int{120} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3440,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{119} + return fileDescriptor_83c10c24ec417dc9, []int{121} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3468,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{120} + return fileDescriptor_83c10c24ec417dc9, []int{122} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3496,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{121} + return fileDescriptor_83c10c24ec417dc9, []int{123} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3524,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{122} + return fileDescriptor_83c10c24ec417dc9, []int{124} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3552,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{123} + return fileDescriptor_83c10c24ec417dc9, []int{125} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3580,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{124} + return fileDescriptor_83c10c24ec417dc9, []int{126} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3608,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{125} + return fileDescriptor_83c10c24ec417dc9, []int{127} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3636,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{126} + return fileDescriptor_83c10c24ec417dc9, []int{128} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3664,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{127} + return fileDescriptor_83c10c24ec417dc9, []int{129} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3692,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{128} + return fileDescriptor_83c10c24ec417dc9, []int{130} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3720,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{129} + return fileDescriptor_83c10c24ec417dc9, []int{131} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3692,7 +3748,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{130} + return fileDescriptor_83c10c24ec417dc9, []int{132} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +3776,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{131} + return fileDescriptor_83c10c24ec417dc9, []int{133} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3804,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{132} + return fileDescriptor_83c10c24ec417dc9, []int{134} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3832,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{133} + return fileDescriptor_83c10c24ec417dc9, []int{135} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3860,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo func (m *PodOS) Reset() { *m = PodOS{} } func (*PodOS) ProtoMessage() {} func (*PodOS) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{134} + return fileDescriptor_83c10c24ec417dc9, []int{136} } func (m *PodOS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3832,7 +3888,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{135} + return fileDescriptor_83c10c24ec417dc9, []int{137} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +3916,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{136} + return fileDescriptor_83c10c24ec417dc9, []int{138} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +3944,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{137} + return fileDescriptor_83c10c24ec417dc9, []int{139} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +3972,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} } func (*PodResourceClaim) ProtoMessage() {} func (*PodResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{138} + return fileDescriptor_83c10c24ec417dc9, []int{140} } func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3944,7 +4000,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} } func (*PodResourceClaimStatus) ProtoMessage() {} func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{139} + return fileDescriptor_83c10c24ec417dc9, []int{141} } func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4028,7 @@ var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} } func (*PodSchedulingGate) ProtoMessage() {} func (*PodSchedulingGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{140} + return fileDescriptor_83c10c24ec417dc9, []int{142} } func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4056,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{141} + return fileDescriptor_83c10c24ec417dc9, []int{143} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4084,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} + return fileDescriptor_83c10c24ec417dc9, []int{144} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4112,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} + return fileDescriptor_83c10c24ec417dc9, []int{145} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4140,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} + return fileDescriptor_83c10c24ec417dc9, []int{146} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4168,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} + return fileDescriptor_83c10c24ec417dc9, []int{147} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4196,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} + return fileDescriptor_83c10c24ec417dc9, []int{148} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4224,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} + return fileDescriptor_83c10c24ec417dc9, []int{149} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4252,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} + return fileDescriptor_83c10c24ec417dc9, []int{150} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4280,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} + return fileDescriptor_83c10c24ec417dc9, []int{151} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4308,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} + return fileDescriptor_83c10c24ec417dc9, []int{152} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4336,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} + return fileDescriptor_83c10c24ec417dc9, []int{153} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4364,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} + return fileDescriptor_83c10c24ec417dc9, []int{154} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4392,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} + return fileDescriptor_83c10c24ec417dc9, []int{155} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4420,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} + return fileDescriptor_83c10c24ec417dc9, []int{156} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4448,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } func (*ProbeHandler) ProtoMessage() {} func (*ProbeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} + return fileDescriptor_83c10c24ec417dc9, []int{157} } func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4476,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} + return fileDescriptor_83c10c24ec417dc9, []int{158} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4504,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} + return fileDescriptor_83c10c24ec417dc9, []int{159} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4532,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} + return fileDescriptor_83c10c24ec417dc9, []int{160} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4560,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} + return fileDescriptor_83c10c24ec417dc9, []int{161} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4588,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} + return fileDescriptor_83c10c24ec417dc9, []int{162} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4616,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} + return fileDescriptor_83c10c24ec417dc9, []int{163} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4644,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} + return fileDescriptor_83c10c24ec417dc9, []int{164} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4672,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} + return fileDescriptor_83c10c24ec417dc9, []int{165} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4700,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} + return fileDescriptor_83c10c24ec417dc9, []int{166} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4728,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} + return fileDescriptor_83c10c24ec417dc9, []int{167} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4756,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} + return fileDescriptor_83c10c24ec417dc9, []int{168} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4784,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} + return fileDescriptor_83c10c24ec417dc9, []int{169} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4812,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} + return fileDescriptor_83c10c24ec417dc9, []int{170} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4840,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} + return fileDescriptor_83c10c24ec417dc9, []int{171} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4868,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} + return fileDescriptor_83c10c24ec417dc9, []int{172} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +4896,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} + return fileDescriptor_83c10c24ec417dc9, []int{173} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +4924,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} + return fileDescriptor_83c10c24ec417dc9, []int{174} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +4952,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} + return fileDescriptor_83c10c24ec417dc9, []int{175} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +4980,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} + return fileDescriptor_83c10c24ec417dc9, []int{176} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +5008,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} + return fileDescriptor_83c10c24ec417dc9, []int{177} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5036,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} + return fileDescriptor_83c10c24ec417dc9, []int{178} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5064,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} + return fileDescriptor_83c10c24ec417dc9, []int{179} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5092,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} + return fileDescriptor_83c10c24ec417dc9, []int{180} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5120,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} + return fileDescriptor_83c10c24ec417dc9, []int{181} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5148,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} + return fileDescriptor_83c10c24ec417dc9, []int{182} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5176,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} + return fileDescriptor_83c10c24ec417dc9, []int{183} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5204,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} + return fileDescriptor_83c10c24ec417dc9, []int{184} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5232,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} + return fileDescriptor_83c10c24ec417dc9, []int{185} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5260,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} + return fileDescriptor_83c10c24ec417dc9, []int{186} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5288,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} + return fileDescriptor_83c10c24ec417dc9, []int{187} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5316,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} + return fileDescriptor_83c10c24ec417dc9, []int{188} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5344,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} + return fileDescriptor_83c10c24ec417dc9, []int{189} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5372,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} + return fileDescriptor_83c10c24ec417dc9, []int{190} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5400,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} + return fileDescriptor_83c10c24ec417dc9, []int{191} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5428,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} + return fileDescriptor_83c10c24ec417dc9, []int{192} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5456,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} + return fileDescriptor_83c10c24ec417dc9, []int{193} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5484,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} + return fileDescriptor_83c10c24ec417dc9, []int{194} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5512,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} + return fileDescriptor_83c10c24ec417dc9, []int{195} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5540,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} + return fileDescriptor_83c10c24ec417dc9, []int{196} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5568,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} + return fileDescriptor_83c10c24ec417dc9, []int{197} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5596,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} + return fileDescriptor_83c10c24ec417dc9, []int{198} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5624,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} + return fileDescriptor_83c10c24ec417dc9, []int{199} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5593,10 +5649,38 @@ func (m *SessionAffinityConfig) XXX_DiscardUnknown() { var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo +func (m *SleepAction) Reset() { *m = SleepAction{} } +func (*SleepAction) ProtoMessage() {} +func (*SleepAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{200} +} +func (m *SleepAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SleepAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SleepAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_SleepAction.Merge(m, src) +} +func (m *SleepAction) XXX_Size() int { + return m.Size() +} +func (m *SleepAction) XXX_DiscardUnknown() { + xxx_messageInfo_SleepAction.DiscardUnknown(m) +} + +var xxx_messageInfo_SleepAction proto.InternalMessageInfo + func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_83c10c24ec417dc9, []int{201} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5708,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_83c10c24ec417dc9, []int{202} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5736,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} + return fileDescriptor_83c10c24ec417dc9, []int{203} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5764,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_83c10c24ec417dc9, []int{204} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5792,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_83c10c24ec417dc9, []int{205} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5820,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_83c10c24ec417dc9, []int{206} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5848,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_83c10c24ec417dc9, []int{207} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5876,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_83c10c24ec417dc9, []int{208} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5904,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_83c10c24ec417dc9, []int{209} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +5932,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{207} + return fileDescriptor_83c10c24ec417dc9, []int{210} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +5960,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } func (*TypedObjectReference) ProtoMessage() {} func (*TypedObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{208} + return fileDescriptor_83c10c24ec417dc9, []int{211} } func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +5988,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{209} + return fileDescriptor_83c10c24ec417dc9, []int{212} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,7 +6016,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{210} + return fileDescriptor_83c10c24ec417dc9, []int{213} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5960,7 +6044,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{211} + return fileDescriptor_83c10c24ec417dc9, []int{214} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5988,7 +6072,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{212} + return fileDescriptor_83c10c24ec417dc9, []int{215} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6016,7 +6100,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{213} + return fileDescriptor_83c10c24ec417dc9, []int{216} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6041,10 +6125,38 @@ func (m *VolumeProjection) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo +func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} } +func (*VolumeResourceRequirements) ProtoMessage() {} +func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{217} +} +func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeResourceRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeResourceRequirements) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeResourceRequirements.Merge(m, src) +} +func (m *VolumeResourceRequirements) XXX_Size() int { + return m.Size() +} +func (m *VolumeResourceRequirements) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeResourceRequirements.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo + func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{214} + return fileDescriptor_83c10c24ec417dc9, []int{218} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6072,7 +6184,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{215} + return fileDescriptor_83c10c24ec417dc9, []int{219} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6100,7 +6212,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{216} + return fileDescriptor_83c10c24ec417dc9, []int{220} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6128,7 +6240,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{217} + return fileDescriptor_83c10c24ec417dc9, []int{221} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6173,6 +6285,7 @@ func init() { proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") proto.RegisterType((*ClaimSource)(nil), "k8s.io.api.core.v1.ClaimSource") proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") + proto.RegisterType((*ClusterTrustBundleProjection)(nil), "k8s.io.api.core.v1.ClusterTrustBundleProjection") proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") @@ -6251,6 +6364,7 @@ func init() { proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") + proto.RegisterType((*ModifyVolumeStatus)(nil), "k8s.io.api.core.v1.ModifyVolumeStatus") proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") proto.RegisterType((*NamespaceCondition)(nil), "k8s.io.api.core.v1.NamespaceCondition") @@ -6382,6 +6496,7 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ServiceSpec.SelectorEntry") proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") + proto.RegisterType((*SleepAction)(nil), "k8s.io.api.core.v1.SleepAction") proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") @@ -6398,6 +6513,9 @@ func init() { proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") + proto.RegisterType((*VolumeResourceRequirements)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements.LimitsEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements.RequestsEntry") proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") @@ -6409,934 +6527,974 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 14822 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x70, 0x24, 0xc9, - 0x75, 0x18, 0xcc, 0xea, 0xc6, 0xd5, 0x0f, 0x77, 0x62, 0x0e, 0x0c, 0x76, 0x66, 0x7a, 0xb6, 0x76, - 0x77, 0x76, 0xf6, 0xc2, 0x70, 0xf6, 0x20, 0x97, 0xbb, 0xe4, 0x8a, 0x38, 0x67, 0xb0, 0x03, 0x60, - 0x7a, 0xb3, 0x31, 0x33, 0xe4, 0x72, 0xc9, 0x60, 0xa1, 0x3b, 0x01, 0x14, 0xd1, 0xa8, 0xea, 0xad, - 0xaa, 0xc6, 0x0c, 0xe6, 0x23, 0x43, 0x12, 0xf5, 0xe9, 0xa0, 0xa4, 0xef, 0x0b, 0xc6, 0x17, 0xfa, - 0x8e, 0xa0, 0x14, 0x8a, 0x2f, 0x24, 0x59, 0x87, 0x69, 0xd9, 0xa6, 0x29, 0x4b, 0xb2, 0xa8, 0xcb, - 0x57, 0x58, 0x72, 0x38, 0x64, 0x59, 0x11, 0x16, 0x15, 0xa1, 0x30, 0x24, 0x8e, 0x1c, 0x21, 0x2b, - 0xc2, 0x96, 0xe4, 0xe3, 0x87, 0x0d, 0xcb, 0x96, 0x23, 0xcf, 0xca, 0xac, 0xa3, 0xbb, 0x31, 0x8b, - 0x01, 0x97, 0x8c, 0xfd, 0xd7, 0xfd, 0xde, 0xcb, 0x97, 0x59, 0x79, 0xbe, 0x7c, 0xef, 0xe5, 0x7b, - 0xf0, 0xea, 0xf6, 0xcb, 0xe1, 0xb4, 0xeb, 0x5f, 0xde, 0x6e, 0xad, 0x93, 0xc0, 0x23, 0x11, 0x09, - 0x2f, 0xef, 0x12, 0xaf, 0xee, 0x07, 0x97, 0x05, 0xc2, 0x69, 0xba, 0x97, 0x6b, 0x7e, 0x40, 0x2e, - 0xef, 0x5e, 0xb9, 0xbc, 0x49, 0x3c, 0x12, 0x38, 0x11, 0xa9, 0x4f, 0x37, 0x03, 0x3f, 0xf2, 0x11, - 0xe2, 0x34, 0xd3, 0x4e, 0xd3, 0x9d, 0xa6, 0x34, 0xd3, 0xbb, 0x57, 0xa6, 0x9e, 0xdb, 0x74, 0xa3, - 0xad, 0xd6, 0xfa, 0x74, 0xcd, 0xdf, 0xb9, 0xbc, 0xe9, 0x6f, 0xfa, 0x97, 0x19, 0xe9, 0x7a, 0x6b, - 0x83, 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0xce, 0x62, 0xea, 0xc5, 0xb8, 0x9a, 0x1d, 0xa7, 0xb6, 0xe5, - 0x7a, 0x24, 0xd8, 0xbb, 0xdc, 0xdc, 0xde, 0x64, 0xf5, 0x06, 0x24, 0xf4, 0x5b, 0x41, 0x8d, 0x24, - 0x2b, 0x6e, 0x5b, 0x2a, 0xbc, 0xbc, 0x43, 0x22, 0x27, 0xa3, 0xb9, 0x53, 0x97, 0xf3, 0x4a, 0x05, - 0x2d, 0x2f, 0x72, 0x77, 0xd2, 0xd5, 0x7c, 0xa0, 0x53, 0x81, 0xb0, 0xb6, 0x45, 0x76, 0x9c, 0x54, - 0xb9, 0x17, 0xf2, 0xca, 0xb5, 0x22, 0xb7, 0x71, 0xd9, 0xf5, 0xa2, 0x30, 0x0a, 0x92, 0x85, 0xec, - 0xaf, 0x5b, 0x70, 0x61, 0xe6, 0x76, 0x75, 0xa1, 0xe1, 0x84, 0x91, 0x5b, 0x9b, 0x6d, 0xf8, 0xb5, - 0xed, 0x6a, 0xe4, 0x07, 0xe4, 0x96, 0xdf, 0x68, 0xed, 0x90, 0x2a, 0xeb, 0x08, 0xf4, 0x2c, 0x0c, - 0xec, 0xb2, 0xff, 0x4b, 0xf3, 0x93, 0xd6, 0x05, 0xeb, 0x52, 0x69, 0x76, 0xec, 0xb7, 0xf6, 0xcb, - 0xef, 0xbb, 0xbf, 0x5f, 0x1e, 0xb8, 0x25, 0xe0, 0x58, 0x51, 0xa0, 0x8b, 0xd0, 0xb7, 0x11, 0xae, - 0xed, 0x35, 0xc9, 0x64, 0x81, 0xd1, 0x8e, 0x08, 0xda, 0xbe, 0xc5, 0x2a, 0x85, 0x62, 0x81, 0x45, - 0x97, 0xa1, 0xd4, 0x74, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0x2c, 0x5e, 0xb0, 0x2e, 0xf5, 0xce, - 0x8e, 0x0b, 0xd2, 0x52, 0x45, 0x22, 0x70, 0x4c, 0x43, 0x9b, 0x11, 0x10, 0xa7, 0x7e, 0xc3, 0x6b, - 0xec, 0x4d, 0xf6, 0x5c, 0xb0, 0x2e, 0x0d, 0xc4, 0xcd, 0xc0, 0x02, 0x8e, 0x15, 0x85, 0xfd, 0xa5, - 0x02, 0x0c, 0xcc, 0x6c, 0x6c, 0xb8, 0x9e, 0x1b, 0xed, 0xa1, 0x5b, 0x30, 0xe4, 0xf9, 0x75, 0x22, - 0xff, 0xb3, 0xaf, 0x18, 0x7c, 0xfe, 0xc2, 0x74, 0x7a, 0x2a, 0x4d, 0xaf, 0x6a, 0x74, 0xb3, 0x63, - 0xf7, 0xf7, 0xcb, 0x43, 0x3a, 0x04, 0x1b, 0x7c, 0x10, 0x86, 0xc1, 0xa6, 0x5f, 0x57, 0x6c, 0x0b, - 0x8c, 0x6d, 0x39, 0x8b, 0x6d, 0x25, 0x26, 0x9b, 0x1d, 0xbd, 0xbf, 0x5f, 0x1e, 0xd4, 0x00, 0x58, - 0x67, 0x82, 0xd6, 0x61, 0x94, 0xfe, 0xf5, 0x22, 0x57, 0xf1, 0x2d, 0x32, 0xbe, 0x8f, 0xe5, 0xf1, - 0xd5, 0x48, 0x67, 0x27, 0xee, 0xef, 0x97, 0x47, 0x13, 0x40, 0x9c, 0x64, 0x68, 0xdf, 0x83, 0x91, - 0x99, 0x28, 0x72, 0x6a, 0x5b, 0xa4, 0xce, 0x47, 0x10, 0xbd, 0x08, 0x3d, 0x9e, 0xb3, 0x43, 0xc4, - 0xf8, 0x5e, 0x10, 0x1d, 0xdb, 0xb3, 0xea, 0xec, 0x90, 0x83, 0xfd, 0xf2, 0xd8, 0x4d, 0xcf, 0x7d, - 0xbb, 0x25, 0x66, 0x05, 0x85, 0x61, 0x46, 0x8d, 0x9e, 0x07, 0xa8, 0x93, 0x5d, 0xb7, 0x46, 0x2a, - 0x4e, 0xb4, 0x25, 0xc6, 0x1b, 0x89, 0xb2, 0x30, 0xaf, 0x30, 0x58, 0xa3, 0xb2, 0xef, 0x42, 0x69, - 0x66, 0xd7, 0x77, 0xeb, 0x15, 0xbf, 0x1e, 0xa2, 0x6d, 0x18, 0x6d, 0x06, 0x64, 0x83, 0x04, 0x0a, - 0x34, 0x69, 0x5d, 0x28, 0x5e, 0x1a, 0x7c, 0xfe, 0x52, 0xe6, 0xc7, 0x9a, 0xa4, 0x0b, 0x5e, 0x14, - 0xec, 0xcd, 0x9e, 0x16, 0xf5, 0x8d, 0x26, 0xb0, 0x38, 0xc9, 0xd9, 0xfe, 0x27, 0x05, 0x38, 0x39, - 0x73, 0xaf, 0x15, 0x90, 0x79, 0x37, 0xdc, 0x4e, 0xce, 0xf0, 0xba, 0x1b, 0x6e, 0xaf, 0xc6, 0x3d, - 0xa0, 0xa6, 0xd6, 0xbc, 0x80, 0x63, 0x45, 0x81, 0x9e, 0x83, 0x7e, 0xfa, 0xfb, 0x26, 0x5e, 0x12, - 0x9f, 0x3c, 0x21, 0x88, 0x07, 0xe7, 0x9d, 0xc8, 0x99, 0xe7, 0x28, 0x2c, 0x69, 0xd0, 0x0a, 0x0c, - 0xd6, 0xd8, 0x82, 0xdc, 0x5c, 0xf1, 0xeb, 0x84, 0x0d, 0x66, 0x69, 0xf6, 0x19, 0x4a, 0x3e, 0x17, - 0x83, 0x0f, 0xf6, 0xcb, 0x93, 0xbc, 0x6d, 0x82, 0x85, 0x86, 0xc3, 0x7a, 0x79, 0x64, 0xab, 0xf5, - 0xd5, 0xc3, 0x38, 0x41, 0xc6, 0xda, 0xba, 0xa4, 0x2d, 0x95, 0x5e, 0xb6, 0x54, 0x86, 0xb2, 0x97, - 0x09, 0xba, 0x02, 0x3d, 0xdb, 0xae, 0x57, 0x9f, 0xec, 0x63, 0xbc, 0xce, 0xd1, 0x31, 0xbf, 0xee, - 0x7a, 0xf5, 0x83, 0xfd, 0xf2, 0xb8, 0xd1, 0x1c, 0x0a, 0xc4, 0x8c, 0xd4, 0xfe, 0xcf, 0x16, 0x94, - 0x19, 0x6e, 0xd1, 0x6d, 0x90, 0x0a, 0x09, 0x42, 0x37, 0x8c, 0x88, 0x17, 0x19, 0x1d, 0xfa, 0x3c, - 0x40, 0x48, 0x6a, 0x01, 0x89, 0xb4, 0x2e, 0x55, 0x13, 0xa3, 0xaa, 0x30, 0x58, 0xa3, 0xa2, 0x1b, - 0x42, 0xb8, 0xe5, 0x04, 0x6c, 0x7e, 0x89, 0x8e, 0x55, 0x1b, 0x42, 0x55, 0x22, 0x70, 0x4c, 0x63, - 0x6c, 0x08, 0xc5, 0x4e, 0x1b, 0x02, 0xfa, 0x08, 0x8c, 0xc6, 0x95, 0x85, 0x4d, 0xa7, 0x26, 0x3b, - 0x90, 0x2d, 0x99, 0xaa, 0x89, 0xc2, 0x49, 0x5a, 0xfb, 0x6f, 0x5a, 0x62, 0xf2, 0xd0, 0xaf, 0x7e, - 0x97, 0x7f, 0xab, 0xfd, 0xcb, 0x16, 0xf4, 0xcf, 0xba, 0x5e, 0xdd, 0xf5, 0x36, 0xd1, 0xa7, 0x61, - 0x80, 0x9e, 0x4d, 0x75, 0x27, 0x72, 0xc4, 0xbe, 0xf7, 0x7e, 0x6d, 0x6d, 0xa9, 0xa3, 0x62, 0xba, - 0xb9, 0xbd, 0x49, 0x01, 0xe1, 0x34, 0xa5, 0xa6, 0xab, 0xed, 0xc6, 0xfa, 0x67, 0x48, 0x2d, 0x5a, - 0x21, 0x91, 0x13, 0x7f, 0x4e, 0x0c, 0xc3, 0x8a, 0x2b, 0xba, 0x0e, 0x7d, 0x91, 0x13, 0x6c, 0x92, - 0x48, 0x6c, 0x80, 0x99, 0x1b, 0x15, 0x2f, 0x89, 0xe9, 0x8a, 0x24, 0x5e, 0x8d, 0xc4, 0xc7, 0xc2, - 0x1a, 0x2b, 0x8a, 0x05, 0x0b, 0xfb, 0x7f, 0xf4, 0xc3, 0x99, 0xb9, 0xea, 0x52, 0xce, 0xbc, 0xba, - 0x08, 0x7d, 0xf5, 0xc0, 0xdd, 0x25, 0x81, 0xe8, 0x67, 0xc5, 0x65, 0x9e, 0x41, 0xb1, 0xc0, 0xa2, - 0x97, 0x61, 0x88, 0x1f, 0x48, 0xd7, 0x1c, 0xaf, 0xde, 0x90, 0x5d, 0x7c, 0x42, 0x50, 0x0f, 0xdd, - 0xd2, 0x70, 0xd8, 0xa0, 0x3c, 0xe4, 0xa4, 0xba, 0x98, 0x58, 0x8c, 0x79, 0x87, 0xdd, 0x17, 0x2c, - 0x18, 0xe3, 0xd5, 0xcc, 0x44, 0x51, 0xe0, 0xae, 0xb7, 0x22, 0x12, 0x4e, 0xf6, 0xb2, 0x9d, 0x6e, - 0x2e, 0xab, 0xb7, 0x72, 0x7b, 0x60, 0xfa, 0x56, 0x82, 0x0b, 0xdf, 0x04, 0x27, 0x45, 0xbd, 0x63, - 0x49, 0x34, 0x4e, 0x55, 0x8b, 0xbe, 0xc7, 0x82, 0xa9, 0x9a, 0xef, 0x45, 0x81, 0xdf, 0x68, 0x90, - 0xa0, 0xd2, 0x5a, 0x6f, 0xb8, 0xe1, 0x16, 0x9f, 0xa7, 0x98, 0x6c, 0xb0, 0x9d, 0x20, 0x67, 0x0c, - 0x15, 0x91, 0x18, 0xc3, 0xf3, 0xf7, 0xf7, 0xcb, 0x53, 0x73, 0xb9, 0xac, 0x70, 0x9b, 0x6a, 0xd0, - 0x36, 0x20, 0x7a, 0x94, 0x56, 0x23, 0x67, 0x93, 0xc4, 0x95, 0xf7, 0x77, 0x5f, 0xf9, 0xa9, 0xfb, - 0xfb, 0x65, 0xb4, 0x9a, 0x62, 0x81, 0x33, 0xd8, 0xa2, 0xb7, 0xe1, 0x04, 0x85, 0xa6, 0xbe, 0x75, - 0xa0, 0xfb, 0xea, 0x26, 0xef, 0xef, 0x97, 0x4f, 0xac, 0x66, 0x30, 0xc1, 0x99, 0xac, 0xd1, 0x77, - 0x59, 0x70, 0x26, 0xfe, 0xfc, 0x85, 0xbb, 0x4d, 0xc7, 0xab, 0xc7, 0x15, 0x97, 0xba, 0xaf, 0x98, - 0xee, 0xc9, 0x67, 0xe6, 0xf2, 0x38, 0xe1, 0xfc, 0x4a, 0x90, 0x07, 0x13, 0xb4, 0x69, 0xc9, 0xba, - 0xa1, 0xfb, 0xba, 0x4f, 0xdf, 0xdf, 0x2f, 0x4f, 0xac, 0xa6, 0x79, 0xe0, 0x2c, 0xc6, 0x53, 0x73, - 0x70, 0x32, 0x73, 0x76, 0xa2, 0x31, 0x28, 0x6e, 0x13, 0x2e, 0x75, 0x95, 0x30, 0xfd, 0x89, 0x4e, - 0x40, 0xef, 0xae, 0xd3, 0x68, 0x89, 0x85, 0x89, 0xf9, 0x9f, 0x57, 0x0a, 0x2f, 0x5b, 0xf6, 0x3f, - 0x2d, 0xc2, 0xe8, 0x5c, 0x75, 0xe9, 0x81, 0x56, 0xbd, 0x7e, 0xec, 0x15, 0xda, 0x1e, 0x7b, 0xf1, - 0x21, 0x5a, 0xcc, 0x3d, 0x44, 0xbf, 0x33, 0x63, 0xc9, 0xf6, 0xb0, 0x25, 0xfb, 0xa1, 0x9c, 0x25, - 0x7b, 0xc4, 0x0b, 0x75, 0x37, 0x67, 0xd6, 0xf6, 0xb2, 0x01, 0xcc, 0x94, 0x90, 0x96, 0xfd, 0x9a, - 0xd3, 0x48, 0x6e, 0xb5, 0x87, 0x9c, 0xba, 0x47, 0x33, 0x8e, 0x35, 0x18, 0x9a, 0x73, 0x9a, 0xce, - 0xba, 0xdb, 0x70, 0x23, 0x97, 0x84, 0xe8, 0x49, 0x28, 0x3a, 0xf5, 0x3a, 0x93, 0xee, 0x4a, 0xb3, - 0x27, 0xef, 0xef, 0x97, 0x8b, 0x33, 0x75, 0x2a, 0x66, 0x80, 0xa2, 0xda, 0xc3, 0x94, 0x02, 0x3d, - 0x0d, 0x3d, 0xf5, 0xc0, 0x6f, 0x4e, 0x16, 0x18, 0x25, 0x5d, 0xe5, 0x3d, 0xf3, 0x81, 0xdf, 0x4c, - 0x90, 0x32, 0x1a, 0xfb, 0x37, 0x0b, 0x70, 0x76, 0x8e, 0x34, 0xb7, 0x16, 0xab, 0x39, 0xe7, 0xc5, - 0x25, 0x18, 0xd8, 0xf1, 0x3d, 0x37, 0xf2, 0x83, 0x50, 0x54, 0xcd, 0x66, 0xc4, 0x8a, 0x80, 0x61, - 0x85, 0x45, 0x17, 0xa0, 0xa7, 0x19, 0x0b, 0xb1, 0x43, 0x52, 0x00, 0x66, 0xe2, 0x2b, 0xc3, 0x50, - 0x8a, 0x56, 0x48, 0x02, 0x31, 0x63, 0x14, 0xc5, 0xcd, 0x90, 0x04, 0x98, 0x61, 0x62, 0x49, 0x80, - 0xca, 0x08, 0xe2, 0x44, 0x48, 0x48, 0x02, 0x14, 0x83, 0x35, 0x2a, 0x54, 0x81, 0x52, 0x98, 0x18, - 0xd9, 0xae, 0x96, 0xe6, 0x30, 0x13, 0x15, 0xd4, 0x48, 0xc6, 0x4c, 0x8c, 0x13, 0xac, 0xaf, 0xa3, - 0xa8, 0xf0, 0xb5, 0x02, 0x20, 0xde, 0x85, 0xdf, 0x62, 0x1d, 0x77, 0x33, 0xdd, 0x71, 0xdd, 0x2f, - 0x89, 0xa3, 0xea, 0xbd, 0xff, 0x62, 0xc1, 0xd9, 0x39, 0xd7, 0xab, 0x93, 0x20, 0x67, 0x02, 0x3e, - 0x9c, 0xbb, 0xf3, 0xe1, 0x84, 0x14, 0x63, 0x8a, 0xf5, 0x1c, 0xc1, 0x14, 0xb3, 0xff, 0xc2, 0x02, - 0xc4, 0x3f, 0xfb, 0x5d, 0xf7, 0xb1, 0x37, 0xd3, 0x1f, 0x7b, 0x04, 0xd3, 0xc2, 0xfe, 0x3b, 0x16, - 0x0c, 0xce, 0x35, 0x1c, 0x77, 0x47, 0x7c, 0xea, 0x1c, 0x8c, 0x4b, 0x45, 0x11, 0x03, 0x6b, 0xb2, - 0x3f, 0xdd, 0xdc, 0xc6, 0x71, 0x12, 0x89, 0xd3, 0xf4, 0xe8, 0x13, 0x70, 0xc6, 0x00, 0xae, 0x91, - 0x9d, 0x66, 0xc3, 0x89, 0xf4, 0x5b, 0x01, 0x3b, 0xfd, 0x71, 0x1e, 0x11, 0xce, 0x2f, 0x6f, 0x2f, - 0xc3, 0xc8, 0x5c, 0xc3, 0x25, 0x5e, 0xb4, 0x54, 0x99, 0xf3, 0xbd, 0x0d, 0x77, 0x13, 0xbd, 0x02, - 0x23, 0x91, 0xbb, 0x43, 0xfc, 0x56, 0x54, 0x25, 0x35, 0xdf, 0x63, 0x77, 0x6d, 0xeb, 0x52, 0xef, - 0x2c, 0xba, 0xbf, 0x5f, 0x1e, 0x59, 0x33, 0x30, 0x38, 0x41, 0x69, 0xff, 0x21, 0x1d, 0x71, 0x7f, - 0xa7, 0xe9, 0x7b, 0xc4, 0x8b, 0xe6, 0x7c, 0xaf, 0xce, 0x75, 0x32, 0xaf, 0x40, 0x4f, 0x44, 0x47, - 0x90, 0x7f, 0xf9, 0x45, 0xb9, 0xb4, 0xe9, 0xb8, 0x1d, 0xec, 0x97, 0x4f, 0xa5, 0x4b, 0xb0, 0x91, - 0x65, 0x65, 0xd0, 0x87, 0xa0, 0x2f, 0x8c, 0x9c, 0xa8, 0x15, 0x8a, 0x4f, 0x7d, 0x54, 0x8e, 0x7f, - 0x95, 0x41, 0x0f, 0xf6, 0xcb, 0xa3, 0xaa, 0x18, 0x07, 0x61, 0x51, 0x00, 0x3d, 0x05, 0xfd, 0x3b, - 0x24, 0x0c, 0x9d, 0x4d, 0x79, 0x7e, 0x8f, 0x8a, 0xb2, 0xfd, 0x2b, 0x1c, 0x8c, 0x25, 0x1e, 0x3d, - 0x06, 0xbd, 0x24, 0x08, 0xfc, 0x40, 0xec, 0x2a, 0xc3, 0x82, 0xb0, 0x77, 0x81, 0x02, 0x31, 0xc7, - 0xd9, 0xff, 0xd2, 0x82, 0x51, 0xd5, 0x56, 0x5e, 0xd7, 0x31, 0xdc, 0x9b, 0xde, 0x04, 0xa8, 0xc9, - 0x0f, 0x0c, 0xd9, 0x79, 0x37, 0xf8, 0xfc, 0xc5, 0x4c, 0xd1, 0x22, 0xd5, 0x8d, 0x31, 0x67, 0x05, - 0x0a, 0xb1, 0xc6, 0xcd, 0xfe, 0x35, 0x0b, 0x26, 0x12, 0x5f, 0xb4, 0xec, 0x86, 0x11, 0x7a, 0x2b, - 0xf5, 0x55, 0xd3, 0xdd, 0x7d, 0x15, 0x2d, 0xcd, 0xbe, 0x49, 0x2d, 0x3e, 0x09, 0xd1, 0xbe, 0xe8, - 0x1a, 0xf4, 0xba, 0x11, 0xd9, 0x91, 0x1f, 0xf3, 0x58, 0xdb, 0x8f, 0xe1, 0xad, 0x8a, 0x47, 0x64, - 0x89, 0x96, 0xc4, 0x9c, 0x81, 0xfd, 0x9b, 0x45, 0x28, 0xf1, 0x69, 0xbb, 0xe2, 0x34, 0x8f, 0x61, - 0x2c, 0x9e, 0x81, 0x92, 0xbb, 0xb3, 0xd3, 0x8a, 0x9c, 0x75, 0x71, 0x00, 0x0d, 0xf0, 0xcd, 0x60, - 0x49, 0x02, 0x71, 0x8c, 0x47, 0x4b, 0xd0, 0xc3, 0x9a, 0xc2, 0xbf, 0xf2, 0xc9, 0xec, 0xaf, 0x14, - 0x6d, 0x9f, 0x9e, 0x77, 0x22, 0x87, 0xcb, 0x7e, 0xea, 0xe4, 0xa3, 0x20, 0xcc, 0x58, 0x20, 0x07, - 0x60, 0xdd, 0xf5, 0x9c, 0x60, 0x8f, 0xc2, 0x26, 0x8b, 0x8c, 0xe1, 0x73, 0xed, 0x19, 0xce, 0x2a, - 0x7a, 0xce, 0x56, 0x7d, 0x58, 0x8c, 0xc0, 0x1a, 0xd3, 0xa9, 0x0f, 0x42, 0x49, 0x11, 0x1f, 0x46, - 0x84, 0x9b, 0xfa, 0x08, 0x8c, 0x26, 0xea, 0xea, 0x54, 0x7c, 0x48, 0x97, 0x00, 0x7f, 0x85, 0x6d, - 0x19, 0xa2, 0xd5, 0x0b, 0xde, 0xae, 0xd8, 0x39, 0xef, 0xc1, 0x89, 0x46, 0xc6, 0xde, 0x2b, 0xc6, - 0xb5, 0xfb, 0xbd, 0xfa, 0xac, 0xf8, 0xec, 0x13, 0x59, 0x58, 0x9c, 0x59, 0x07, 0x95, 0x6a, 0xfc, - 0x26, 0x5d, 0x20, 0x4e, 0x43, 0xbf, 0x20, 0xdc, 0x10, 0x30, 0xac, 0xb0, 0x74, 0xbf, 0x3b, 0xa1, - 0x1a, 0x7f, 0x9d, 0xec, 0x55, 0x49, 0x83, 0xd4, 0x22, 0x3f, 0xf8, 0xa6, 0x36, 0xff, 0x1c, 0xef, - 0x7d, 0xbe, 0x5d, 0x0e, 0x0a, 0x06, 0xc5, 0xeb, 0x64, 0x8f, 0x0f, 0x85, 0xfe, 0x75, 0xc5, 0xb6, - 0x5f, 0xf7, 0x15, 0x0b, 0x86, 0xd5, 0xd7, 0x1d, 0xc3, 0xbe, 0x30, 0x6b, 0xee, 0x0b, 0xe7, 0xda, - 0x4e, 0xf0, 0x9c, 0x1d, 0xe1, 0x6b, 0x05, 0x38, 0xa3, 0x68, 0xe8, 0x6d, 0x86, 0xff, 0x11, 0xb3, - 0xea, 0x32, 0x94, 0x3c, 0xa5, 0xd7, 0xb3, 0x4c, 0x85, 0x5a, 0xac, 0xd5, 0x8b, 0x69, 0xa8, 0x50, - 0xea, 0xc5, 0xc7, 0xec, 0x90, 0xae, 0xf0, 0x16, 0xca, 0xed, 0x59, 0x28, 0xb6, 0xdc, 0xba, 0x38, - 0x60, 0xde, 0x2f, 0x7b, 0xfb, 0xe6, 0xd2, 0xfc, 0xc1, 0x7e, 0xf9, 0xd1, 0x3c, 0x63, 0x0b, 0x3d, - 0xd9, 0xc2, 0xe9, 0x9b, 0x4b, 0xf3, 0x98, 0x16, 0x46, 0x33, 0x30, 0x2a, 0x4f, 0xe8, 0x5b, 0x54, - 0x40, 0xf4, 0x3d, 0x71, 0x0e, 0x29, 0xad, 0x35, 0x36, 0xd1, 0x38, 0x49, 0x8f, 0xe6, 0x61, 0x6c, - 0xbb, 0xb5, 0x4e, 0x1a, 0x24, 0xe2, 0x1f, 0x7c, 0x9d, 0x70, 0x9d, 0x6e, 0x29, 0xbe, 0x4b, 0x5e, - 0x4f, 0xe0, 0x71, 0xaa, 0x84, 0xfd, 0xd7, 0xec, 0x3c, 0x10, 0xbd, 0x57, 0x09, 0x7c, 0x3a, 0xb1, - 0x28, 0xf7, 0x6f, 0xe6, 0x74, 0xee, 0x66, 0x56, 0x5c, 0x27, 0x7b, 0x6b, 0x3e, 0xbd, 0x4b, 0x64, - 0xcf, 0x0a, 0x63, 0xce, 0xf7, 0xb4, 0x9d, 0xf3, 0xbf, 0x50, 0x80, 0x93, 0xaa, 0x07, 0x0c, 0xb1, - 0xf5, 0x5b, 0xbd, 0x0f, 0xae, 0xc0, 0x60, 0x9d, 0x6c, 0x38, 0xad, 0x46, 0xa4, 0x0c, 0x0c, 0xbd, - 0xdc, 0xc8, 0x34, 0x1f, 0x83, 0xb1, 0x4e, 0x73, 0x88, 0x6e, 0xfb, 0xf9, 0x61, 0x76, 0x10, 0x47, - 0x0e, 0x9d, 0xe3, 0x6a, 0xd5, 0x58, 0xb9, 0xab, 0xe6, 0x31, 0xe8, 0x75, 0x77, 0xa8, 0x60, 0x56, - 0x30, 0xe5, 0xad, 0x25, 0x0a, 0xc4, 0x1c, 0x87, 0x9e, 0x80, 0xfe, 0x9a, 0xbf, 0xb3, 0xe3, 0x78, - 0x75, 0x76, 0xe4, 0x95, 0x66, 0x07, 0xa9, 0xec, 0x36, 0xc7, 0x41, 0x58, 0xe2, 0xd0, 0x59, 0xe8, - 0x71, 0x82, 0x4d, 0xae, 0x75, 0x29, 0xcd, 0x0e, 0xd0, 0x9a, 0x66, 0x82, 0xcd, 0x10, 0x33, 0x28, - 0xbd, 0x34, 0xde, 0xf1, 0x83, 0x6d, 0xd7, 0xdb, 0x9c, 0x77, 0x03, 0xb1, 0x24, 0xd4, 0x59, 0x78, - 0x5b, 0x61, 0xb0, 0x46, 0x85, 0x16, 0xa1, 0xb7, 0xe9, 0x07, 0x51, 0x38, 0xd9, 0xc7, 0xba, 0xfb, - 0xd1, 0x9c, 0x8d, 0x88, 0x7f, 0x6d, 0xc5, 0x0f, 0xa2, 0xf8, 0x03, 0xe8, 0xbf, 0x10, 0xf3, 0xe2, - 0x68, 0x19, 0xfa, 0x89, 0xb7, 0xbb, 0x18, 0xf8, 0x3b, 0x93, 0x13, 0xf9, 0x9c, 0x16, 0x38, 0x09, - 0x9f, 0x66, 0xb1, 0x8c, 0x2a, 0xc0, 0x58, 0xb2, 0x40, 0x1f, 0x82, 0x22, 0xf1, 0x76, 0x27, 0xfb, - 0x19, 0xa7, 0xa9, 0x1c, 0x4e, 0xb7, 0x9c, 0x20, 0xde, 0xf3, 0x17, 0xbc, 0x5d, 0x4c, 0xcb, 0xa0, - 0x8f, 0x43, 0x49, 0x6e, 0x18, 0xa1, 0x50, 0x67, 0x66, 0x4e, 0x58, 0xb9, 0xcd, 0x60, 0xf2, 0x76, - 0xcb, 0x0d, 0xc8, 0x0e, 0xf1, 0xa2, 0x30, 0xde, 0x21, 0x25, 0x36, 0xc4, 0x31, 0x37, 0x54, 0x83, - 0xa1, 0x80, 0x84, 0xee, 0x3d, 0x52, 0xf1, 0x1b, 0x6e, 0x6d, 0x6f, 0xf2, 0x34, 0x6b, 0xde, 0x53, - 0x6d, 0xbb, 0x0c, 0x6b, 0x05, 0x62, 0x75, 0xbb, 0x0e, 0xc5, 0x06, 0x53, 0xf4, 0x06, 0x0c, 0x07, - 0x24, 0x8c, 0x9c, 0x20, 0x12, 0xb5, 0x4c, 0x2a, 0xf3, 0xd8, 0x30, 0xd6, 0x11, 0xfc, 0x3a, 0x11, - 0x57, 0x13, 0x63, 0xb0, 0xc9, 0x01, 0x7d, 0x5c, 0xea, 0xfe, 0x57, 0xfc, 0x96, 0x17, 0x85, 0x93, - 0x25, 0xd6, 0xee, 0x4c, 0xab, 0xec, 0xad, 0x98, 0x2e, 0x69, 0x1c, 0xe0, 0x85, 0xb1, 0xc1, 0x0a, - 0x7d, 0x12, 0x86, 0xf9, 0x7f, 0x6e, 0xdb, 0x0c, 0x27, 0x4f, 0x32, 0xde, 0x17, 0xf2, 0x79, 0x73, - 0xc2, 0xd9, 0x93, 0x82, 0xf9, 0xb0, 0x0e, 0x0d, 0xb1, 0xc9, 0x0d, 0x61, 0x18, 0x6e, 0xb8, 0xbb, - 0xc4, 0x23, 0x61, 0x58, 0x09, 0xfc, 0x75, 0x22, 0x54, 0xb5, 0x67, 0xb2, 0x6d, 0xa1, 0xfe, 0x3a, - 0x99, 0x1d, 0xa7, 0x3c, 0x97, 0xf5, 0x32, 0xd8, 0x64, 0x81, 0x6e, 0xc2, 0x08, 0xbd, 0x1b, 0xbb, - 0x31, 0xd3, 0xc1, 0x4e, 0x4c, 0xd9, 0x7d, 0x10, 0x1b, 0x85, 0x70, 0x82, 0x09, 0xba, 0x01, 0x43, - 0xac, 0xcf, 0x5b, 0x4d, 0xce, 0xf4, 0x54, 0x27, 0xa6, 0xcc, 0x94, 0x5e, 0xd5, 0x8a, 0x60, 0x83, - 0x01, 0x7a, 0x1d, 0x4a, 0x0d, 0x77, 0x83, 0xd4, 0xf6, 0x6a, 0x0d, 0x32, 0x39, 0xc4, 0xb8, 0x65, - 0x6e, 0x86, 0xcb, 0x92, 0x88, 0xcb, 0xe7, 0xea, 0x2f, 0x8e, 0x8b, 0xa3, 0x5b, 0x70, 0x2a, 0x22, - 0xc1, 0x8e, 0xeb, 0x39, 0x74, 0x13, 0x13, 0x57, 0x42, 0x66, 0xa2, 0x1e, 0x66, 0xb3, 0xeb, 0xbc, - 0x18, 0x8d, 0x53, 0x6b, 0x99, 0x54, 0x38, 0xa7, 0x34, 0xba, 0x0b, 0x93, 0x19, 0x18, 0x3e, 0x6f, - 0x4f, 0x30, 0xce, 0x1f, 0x16, 0x9c, 0x27, 0xd7, 0x72, 0xe8, 0x0e, 0xda, 0xe0, 0x70, 0x2e, 0x77, - 0x74, 0x03, 0x46, 0xd9, 0xce, 0x59, 0x69, 0x35, 0x1a, 0xa2, 0xc2, 0x11, 0x56, 0xe1, 0x13, 0x52, - 0x8e, 0x58, 0x32, 0xd1, 0x07, 0xfb, 0x65, 0x88, 0xff, 0xe1, 0x64, 0x69, 0xb4, 0xce, 0xac, 0xa1, - 0xad, 0xc0, 0x8d, 0xf6, 0xe8, 0xaa, 0x22, 0x77, 0xa3, 0xc9, 0xd1, 0xb6, 0x9a, 0x21, 0x9d, 0x54, - 0x99, 0x4c, 0x75, 0x20, 0x4e, 0x32, 0xa4, 0x47, 0x41, 0x18, 0xd5, 0x5d, 0x6f, 0x72, 0x8c, 0xdf, - 0xa7, 0xe4, 0x4e, 0x5a, 0xa5, 0x40, 0xcc, 0x71, 0xcc, 0x12, 0x4a, 0x7f, 0xdc, 0xa0, 0x27, 0xee, - 0x38, 0x23, 0x8c, 0x2d, 0xa1, 0x12, 0x81, 0x63, 0x1a, 0x2a, 0x04, 0x47, 0xd1, 0xde, 0x24, 0x62, - 0xa4, 0x6a, 0x43, 0x5c, 0x5b, 0xfb, 0x38, 0xa6, 0x70, 0x7b, 0x1d, 0x46, 0xd4, 0x36, 0xc1, 0xfa, - 0x04, 0x95, 0xa1, 0x97, 0x89, 0x7d, 0x42, 0x8f, 0x59, 0xa2, 0x4d, 0x60, 0x22, 0x21, 0xe6, 0x70, - 0xd6, 0x04, 0xf7, 0x1e, 0x99, 0xdd, 0x8b, 0x08, 0xd7, 0x45, 0x14, 0xb5, 0x26, 0x48, 0x04, 0x8e, - 0x69, 0xec, 0xff, 0xc9, 0xc5, 0xe7, 0xf8, 0x94, 0xe8, 0xe2, 0x5c, 0x7c, 0x16, 0x06, 0xb6, 0xfc, - 0x30, 0xa2, 0xd4, 0xac, 0x8e, 0xde, 0x58, 0x60, 0xbe, 0x26, 0xe0, 0x58, 0x51, 0xa0, 0x57, 0x61, - 0xb8, 0xa6, 0x57, 0x20, 0x0e, 0x75, 0xb5, 0x8d, 0x18, 0xb5, 0x63, 0x93, 0x16, 0xbd, 0x0c, 0x03, - 0xcc, 0xbb, 0xa7, 0xe6, 0x37, 0x84, 0xb4, 0x29, 0x25, 0x93, 0x81, 0x8a, 0x80, 0x1f, 0x68, 0xbf, - 0xb1, 0xa2, 0x46, 0x17, 0xa1, 0x8f, 0x36, 0x61, 0xa9, 0x22, 0x8e, 0x53, 0xa5, 0x92, 0xbb, 0xc6, - 0xa0, 0x58, 0x60, 0xed, 0x5f, 0xb3, 0x98, 0x2c, 0x95, 0xde, 0xf3, 0xd1, 0x35, 0x76, 0x68, 0xb0, - 0x13, 0x44, 0x53, 0x89, 0x3d, 0xae, 0x9d, 0x04, 0x0a, 0x77, 0x90, 0xf8, 0x8f, 0x8d, 0x92, 0xe8, - 0xcd, 0xe4, 0xc9, 0xc0, 0x05, 0x8a, 0x17, 0x65, 0x17, 0x24, 0x4f, 0x87, 0x47, 0xe2, 0x23, 0x8e, - 0xb6, 0xa7, 0xdd, 0x11, 0x61, 0xff, 0x5f, 0x05, 0x6d, 0x96, 0x54, 0x23, 0x27, 0x22, 0xa8, 0x02, - 0xfd, 0x77, 0x1c, 0x37, 0x72, 0xbd, 0x4d, 0x21, 0xf7, 0xb5, 0x3f, 0xe8, 0x58, 0xa1, 0xdb, 0xbc, - 0x00, 0x97, 0x5e, 0xc4, 0x1f, 0x2c, 0xd9, 0x50, 0x8e, 0x41, 0xcb, 0xf3, 0x28, 0xc7, 0x42, 0xb7, - 0x1c, 0x31, 0x2f, 0xc0, 0x39, 0x8a, 0x3f, 0x58, 0xb2, 0x41, 0x6f, 0x01, 0xc8, 0x1d, 0x82, 0xd4, - 0x85, 0x57, 0xd0, 0xb3, 0x9d, 0x99, 0xae, 0xa9, 0x32, 0xb3, 0x23, 0x54, 0x36, 0x8a, 0xff, 0x63, - 0x8d, 0x9f, 0x1d, 0x69, 0x63, 0xaa, 0x37, 0x06, 0x7d, 0x82, 0x2e, 0x51, 0x27, 0x88, 0x48, 0x7d, - 0x26, 0x12, 0x9d, 0xf3, 0x74, 0x77, 0x97, 0xc3, 0x35, 0x77, 0x87, 0xe8, 0xcb, 0x59, 0x30, 0xc1, - 0x31, 0x3f, 0xfb, 0x97, 0x8a, 0x30, 0x99, 0xd7, 0x5c, 0xba, 0x68, 0xc8, 0x5d, 0x37, 0x9a, 0xa3, - 0x62, 0xad, 0x65, 0x2e, 0x9a, 0x05, 0x01, 0xc7, 0x8a, 0x82, 0xce, 0xde, 0xd0, 0xdd, 0x94, 0x77, - 0xfb, 0xde, 0x78, 0xf6, 0x56, 0x19, 0x14, 0x0b, 0x2c, 0xa5, 0x0b, 0x88, 0x13, 0x0a, 0xb7, 0x33, - 0x6d, 0x96, 0x63, 0x06, 0xc5, 0x02, 0xab, 0x6b, 0x19, 0x7b, 0x3a, 0x68, 0x19, 0x8d, 0x2e, 0xea, - 0x3d, 0xda, 0x2e, 0x42, 0x9f, 0x02, 0xd8, 0x70, 0x3d, 0x37, 0xdc, 0x62, 0xdc, 0xfb, 0x0e, 0xcd, - 0x5d, 0x09, 0xc5, 0x8b, 0x8a, 0x0b, 0xd6, 0x38, 0xa2, 0x97, 0x60, 0x50, 0x6d, 0x20, 0x4b, 0xf3, - 0xcc, 0x06, 0xaf, 0xf9, 0x34, 0xc5, 0xbb, 0xe9, 0x3c, 0xd6, 0xe9, 0xec, 0xcf, 0x24, 0xe7, 0x8b, - 0x58, 0x01, 0x5a, 0xff, 0x5a, 0xdd, 0xf6, 0x6f, 0xa1, 0x7d, 0xff, 0xda, 0xdf, 0xe8, 0x83, 0x51, - 0xa3, 0xb2, 0x56, 0xd8, 0xc5, 0x9e, 0x7b, 0x95, 0x1e, 0x40, 0x4e, 0x44, 0xc4, 0xfa, 0xb3, 0x3b, - 0x2f, 0x15, 0xfd, 0x90, 0xa2, 0x2b, 0x80, 0x97, 0x47, 0x9f, 0x82, 0x52, 0xc3, 0x09, 0x99, 0xc6, - 0x92, 0x88, 0x75, 0xd7, 0x0d, 0xb3, 0xf8, 0x42, 0xe8, 0x84, 0x91, 0x76, 0xea, 0x73, 0xde, 0x31, - 0x4b, 0x7a, 0x52, 0x52, 0xf9, 0x4a, 0xfa, 0x35, 0xaa, 0x46, 0x50, 0x21, 0x6c, 0x0f, 0x73, 0x1c, - 0x7a, 0x99, 0x6d, 0xad, 0x74, 0x56, 0xcc, 0x51, 0x69, 0x94, 0x4d, 0xb3, 0x5e, 0x43, 0xc8, 0x56, - 0x38, 0x6c, 0x50, 0xc6, 0x77, 0xb2, 0xbe, 0x36, 0x77, 0xb2, 0xa7, 0xa0, 0x9f, 0xfd, 0x50, 0x33, - 0x40, 0x8d, 0xc6, 0x12, 0x07, 0x63, 0x89, 0x4f, 0x4e, 0x98, 0x81, 0xee, 0x26, 0x0c, 0xbd, 0xf5, - 0x89, 0x49, 0xcd, 0xfc, 0x1f, 0x06, 0xf8, 0x2e, 0x27, 0xa6, 0x3c, 0x96, 0x38, 0xf4, 0x33, 0x16, - 0x20, 0xa7, 0x41, 0x6f, 0xcb, 0x14, 0xac, 0x2e, 0x37, 0xc0, 0x44, 0xed, 0x57, 0x3b, 0x76, 0x7b, - 0x2b, 0x9c, 0x9e, 0x49, 0x95, 0xe6, 0x9a, 0xd2, 0x57, 0x44, 0x13, 0x51, 0x9a, 0x40, 0x3f, 0x8c, - 0x96, 0xdd, 0x30, 0xfa, 0xfc, 0x1f, 0x25, 0x0e, 0xa7, 0x8c, 0x26, 0xa1, 0x9b, 0xfa, 0xe5, 0x6b, - 0xf0, 0x90, 0x97, 0xaf, 0xe1, 0xbc, 0x8b, 0xd7, 0x54, 0x0b, 0x4e, 0xe7, 0x7c, 0x41, 0x86, 0xfe, - 0x75, 0x5e, 0xd7, 0xbf, 0x76, 0xd0, 0xda, 0x4d, 0xcb, 0x3a, 0xa6, 0xdf, 0x68, 0x39, 0x5e, 0xe4, - 0x46, 0x7b, 0xba, 0xbe, 0xf6, 0x69, 0x18, 0x99, 0x77, 0xc8, 0x8e, 0xef, 0x2d, 0x78, 0xf5, 0xa6, - 0xef, 0x7a, 0x11, 0x9a, 0x84, 0x1e, 0x26, 0x7c, 0xf0, 0xad, 0xb7, 0x87, 0xf6, 0x1e, 0x66, 0x10, - 0x7b, 0x13, 0x4e, 0xce, 0xfb, 0x77, 0xbc, 0x3b, 0x4e, 0x50, 0x9f, 0xa9, 0x2c, 0x69, 0xfa, 0xa4, - 0x55, 0xa9, 0xcf, 0xb0, 0xf2, 0x6f, 0x8b, 0x5a, 0x49, 0x7e, 0x1d, 0x5a, 0x74, 0x1b, 0x24, 0x47, - 0xeb, 0xf7, 0xff, 0x16, 0x8c, 0x9a, 0x62, 0x7a, 0x65, 0x77, 0xb6, 0x72, 0xed, 0xce, 0x6f, 0xc0, - 0xc0, 0x86, 0x4b, 0x1a, 0x75, 0x4c, 0x36, 0x44, 0xef, 0x3c, 0x99, 0xef, 0x99, 0xb6, 0x48, 0x29, - 0xa5, 0x96, 0x97, 0x6b, 0x43, 0x16, 0x45, 0x61, 0xac, 0xd8, 0xa0, 0x6d, 0x18, 0x93, 0x7d, 0x28, - 0xb1, 0x62, 0x3f, 0x78, 0xaa, 0xdd, 0xc0, 0x9b, 0xcc, 0x4f, 0xdc, 0xdf, 0x2f, 0x8f, 0xe1, 0x04, - 0x1b, 0x9c, 0x62, 0x8c, 0xce, 0x42, 0xcf, 0x0e, 0x3d, 0xf9, 0x7a, 0x58, 0xf7, 0x33, 0xf5, 0x07, - 0xd3, 0xe4, 0x30, 0xa8, 0xfd, 0x63, 0x16, 0x9c, 0x4e, 0xf5, 0x8c, 0xd0, 0x68, 0x1d, 0xf1, 0x28, - 0x24, 0x35, 0x4c, 0x85, 0xce, 0x1a, 0x26, 0xfb, 0x6f, 0x59, 0x70, 0x62, 0x61, 0xa7, 0x19, 0xed, - 0xcd, 0xbb, 0xa6, 0x91, 0xf8, 0x83, 0xd0, 0xb7, 0x43, 0xea, 0x6e, 0x6b, 0x47, 0x8c, 0x5c, 0x59, - 0x9e, 0x0e, 0x2b, 0x0c, 0x7a, 0xb0, 0x5f, 0x1e, 0xae, 0x46, 0x7e, 0xe0, 0x6c, 0x12, 0x0e, 0xc0, - 0x82, 0x9c, 0x9d, 0xb1, 0xee, 0x3d, 0xb2, 0xec, 0xee, 0xb8, 0xd1, 0x83, 0xcd, 0x76, 0x61, 0xdf, - 0x95, 0x4c, 0x70, 0xcc, 0xcf, 0xfe, 0xba, 0x05, 0xa3, 0x72, 0xde, 0xcf, 0xd4, 0xeb, 0x01, 0x09, - 0x43, 0x34, 0x05, 0x05, 0xb7, 0x29, 0x5a, 0x09, 0xa2, 0x95, 0x85, 0xa5, 0x0a, 0x2e, 0xb8, 0x4d, - 0x29, 0xce, 0xb3, 0x03, 0xa8, 0x68, 0x9a, 0xba, 0xaf, 0x09, 0x38, 0x56, 0x14, 0xe8, 0x12, 0x0c, - 0x78, 0x7e, 0x9d, 0x4b, 0xc4, 0x5c, 0x94, 0x60, 0x13, 0x6c, 0x55, 0xc0, 0xb0, 0xc2, 0xa2, 0x0a, - 0x94, 0xb8, 0x23, 0x64, 0x3c, 0x69, 0xbb, 0x72, 0xa7, 0x64, 0x5f, 0xb6, 0x26, 0x4b, 0xe2, 0x98, - 0x89, 0xfd, 0x1b, 0x16, 0x0c, 0xc9, 0x2f, 0xeb, 0xf2, 0xae, 0x42, 0x97, 0x56, 0x7c, 0x4f, 0x89, - 0x97, 0x16, 0xbd, 0x6b, 0x30, 0x8c, 0x71, 0xc5, 0x28, 0x1e, 0xea, 0x8a, 0x71, 0x05, 0x06, 0x9d, - 0x66, 0xb3, 0x62, 0xde, 0x4f, 0xd8, 0x54, 0x9a, 0x89, 0xc1, 0x58, 0xa7, 0xb1, 0x7f, 0xb4, 0x00, - 0x23, 0xf2, 0x0b, 0xaa, 0xad, 0xf5, 0x90, 0x44, 0x68, 0x0d, 0x4a, 0x0e, 0x1f, 0x25, 0x22, 0x27, - 0xf9, 0x63, 0xd9, 0x7a, 0x33, 0x63, 0x48, 0x63, 0x41, 0x6b, 0x46, 0x96, 0xc6, 0x31, 0x23, 0xd4, - 0x80, 0x71, 0xcf, 0x8f, 0xd8, 0xa1, 0xab, 0xf0, 0xed, 0x4c, 0x99, 0x49, 0xee, 0x67, 0x04, 0xf7, - 0xf1, 0xd5, 0x24, 0x17, 0x9c, 0x66, 0x8c, 0x16, 0xa4, 0x2e, 0xb2, 0x98, 0xaf, 0x44, 0xd2, 0x07, - 0x2e, 0x5b, 0x15, 0x69, 0xff, 0xaa, 0x05, 0x25, 0x49, 0x76, 0x1c, 0x56, 0xeb, 0x15, 0xe8, 0x0f, - 0xd9, 0x20, 0xc8, 0xae, 0xb1, 0xdb, 0x35, 0x9c, 0x8f, 0x57, 0x2c, 0x4b, 0xf0, 0xff, 0x21, 0x96, - 0x3c, 0x98, 0x29, 0x4a, 0x35, 0xff, 0x5d, 0x62, 0x8a, 0x52, 0xed, 0xc9, 0x39, 0x94, 0xfe, 0x94, - 0xb5, 0x59, 0xd3, 0xed, 0x52, 0x91, 0xb7, 0x19, 0x90, 0x0d, 0xf7, 0x6e, 0x52, 0xe4, 0xad, 0x30, - 0x28, 0x16, 0x58, 0xf4, 0x16, 0x0c, 0xd5, 0xa4, 0x0d, 0x22, 0x5e, 0xe1, 0x17, 0xdb, 0xda, 0xc3, - 0x94, 0xe9, 0x94, 0xeb, 0xd0, 0xe6, 0xb4, 0xf2, 0xd8, 0xe0, 0x66, 0x3a, 0xfa, 0x14, 0x3b, 0x39, - 0xfa, 0xc4, 0x7c, 0xf3, 0xdd, 0x5e, 0x7e, 0xdc, 0x82, 0x3e, 0xae, 0x7b, 0xee, 0x4e, 0xf5, 0xaf, - 0x59, 0x92, 0xe3, 0xbe, 0xbb, 0x45, 0x81, 0x42, 0xd2, 0x40, 0x2b, 0x50, 0x62, 0x3f, 0x98, 0xee, - 0xbc, 0x98, 0xff, 0x0e, 0x87, 0xd7, 0xaa, 0x37, 0xf0, 0x96, 0x2c, 0x86, 0x63, 0x0e, 0xf6, 0x8f, - 0x14, 0xe9, 0xee, 0x16, 0x93, 0x1a, 0x87, 0xbe, 0xf5, 0xf0, 0x0e, 0xfd, 0xc2, 0xc3, 0x3a, 0xf4, - 0x37, 0x61, 0xb4, 0xa6, 0xd9, 0x9d, 0xe3, 0x91, 0xbc, 0xd4, 0x76, 0x92, 0x68, 0x26, 0x6a, 0xae, - 0x9d, 0x9b, 0x33, 0x99, 0xe0, 0x24, 0x57, 0xf4, 0x09, 0x18, 0xe2, 0xe3, 0x2c, 0x6a, 0xe1, 0xbe, - 0x52, 0x4f, 0xe4, 0xcf, 0x17, 0xbd, 0x0a, 0xae, 0xcd, 0xd5, 0x8a, 0x63, 0x83, 0x99, 0xfd, 0x97, - 0x16, 0xa0, 0x85, 0xe6, 0x16, 0xd9, 0x21, 0x81, 0xd3, 0x88, 0xcd, 0x47, 0x3f, 0x68, 0xc1, 0x24, - 0x49, 0x81, 0xe7, 0xfc, 0x9d, 0x1d, 0x71, 0x59, 0xcc, 0xd1, 0x67, 0x2c, 0xe4, 0x94, 0x51, 0x0f, - 0x95, 0x26, 0xf3, 0x28, 0x70, 0x6e, 0x7d, 0x68, 0x05, 0x26, 0xf8, 0x29, 0xa9, 0x10, 0x9a, 0xdf, - 0xd5, 0x23, 0x82, 0xf1, 0xc4, 0x5a, 0x9a, 0x04, 0x67, 0x95, 0xb3, 0x7f, 0x75, 0x18, 0x72, 0x5b, - 0xf1, 0x9e, 0xdd, 0xec, 0x3d, 0xbb, 0xd9, 0x7b, 0x76, 0xb3, 0xf7, 0xec, 0x66, 0xef, 0xd9, 0xcd, - 0xde, 0xb3, 0x9b, 0xbd, 0x4b, 0xed, 0x66, 0xff, 0xb7, 0x05, 0x27, 0xd5, 0xf1, 0x65, 0x5c, 0xd8, - 0x3f, 0x0b, 0x13, 0x7c, 0xb9, 0x19, 0x3e, 0xc6, 0xe2, 0xb8, 0xbe, 0x92, 0x39, 0x73, 0x13, 0xbe, - 0xf0, 0x46, 0x41, 0xfe, 0xa8, 0x28, 0x03, 0x81, 0xb3, 0xaa, 0xb1, 0x7f, 0x69, 0x00, 0x7a, 0x17, - 0x76, 0x89, 0x17, 0x1d, 0xc3, 0xd5, 0xa6, 0x06, 0x23, 0xae, 0xb7, 0xeb, 0x37, 0x76, 0x49, 0x9d, - 0xe3, 0x0f, 0x73, 0x03, 0x3f, 0x25, 0x58, 0x8f, 0x2c, 0x19, 0x2c, 0x70, 0x82, 0xe5, 0xc3, 0xb0, - 0x3e, 0x5c, 0x85, 0x3e, 0x7e, 0xf8, 0x08, 0xd3, 0x43, 0xe6, 0x9e, 0xcd, 0x3a, 0x51, 0x1c, 0xa9, - 0xb1, 0x65, 0x84, 0x1f, 0x6e, 0xa2, 0x38, 0xfa, 0x0c, 0x8c, 0x6c, 0xb8, 0x41, 0x18, 0xad, 0xb9, - 0x3b, 0xf4, 0x68, 0xd8, 0x69, 0x3e, 0x80, 0xb5, 0x41, 0xf5, 0xc3, 0xa2, 0xc1, 0x09, 0x27, 0x38, - 0xa3, 0x4d, 0x18, 0x6e, 0x38, 0x7a, 0x55, 0xfd, 0x87, 0xae, 0x4a, 0x9d, 0x0e, 0xcb, 0x3a, 0x23, - 0x6c, 0xf2, 0xa5, 0xcb, 0xa9, 0xc6, 0x14, 0xe6, 0x03, 0x4c, 0x9d, 0xa1, 0x96, 0x13, 0xd7, 0x94, - 0x73, 0x1c, 0x15, 0xd0, 0x98, 0x23, 0x7b, 0xc9, 0x14, 0xd0, 0x34, 0x77, 0xf5, 0x4f, 0x43, 0x89, - 0xd0, 0x2e, 0xa4, 0x8c, 0xc5, 0x01, 0x73, 0xb9, 0xbb, 0xb6, 0xae, 0xb8, 0xb5, 0xc0, 0x37, 0xed, - 0x3c, 0x0b, 0x92, 0x13, 0x8e, 0x99, 0xa2, 0x39, 0xe8, 0x0b, 0x49, 0xe0, 0x2a, 0x5d, 0x72, 0x9b, - 0x61, 0x64, 0x64, 0xfc, 0xd5, 0x1a, 0xff, 0x8d, 0x45, 0x51, 0x3a, 0xbd, 0x1c, 0xa6, 0x8a, 0x65, - 0x87, 0x81, 0x36, 0xbd, 0x66, 0x18, 0x14, 0x0b, 0x2c, 0x7a, 0x1d, 0xfa, 0x03, 0xd2, 0x60, 0x86, - 0xc4, 0xe1, 0xee, 0x27, 0x39, 0xb7, 0x4b, 0xf2, 0x72, 0x58, 0x32, 0x40, 0xd7, 0x01, 0x05, 0x84, - 0x0a, 0x78, 0xae, 0xb7, 0xa9, 0xdc, 0xbb, 0xc5, 0x46, 0xab, 0x04, 0x69, 0x1c, 0x53, 0xc8, 0x07, - 0x8b, 0x38, 0xa3, 0x18, 0xba, 0x0a, 0xe3, 0x0a, 0xba, 0xe4, 0x85, 0x91, 0x43, 0x37, 0xb8, 0x51, - 0xc6, 0x4b, 0xe9, 0x57, 0x70, 0x92, 0x00, 0xa7, 0xcb, 0xd8, 0x3f, 0x67, 0x01, 0xef, 0xe7, 0x63, - 0xd0, 0x2a, 0xbc, 0x66, 0x6a, 0x15, 0xce, 0xe4, 0x8e, 0x5c, 0x8e, 0x46, 0xe1, 0xe7, 0x2c, 0x18, - 0xd4, 0x46, 0x36, 0x9e, 0xb3, 0x56, 0x9b, 0x39, 0xdb, 0x82, 0x31, 0x3a, 0xd3, 0x6f, 0xac, 0x87, - 0x24, 0xd8, 0x25, 0x75, 0x36, 0x31, 0x0b, 0x0f, 0x36, 0x31, 0x95, 0x2b, 0xe9, 0x72, 0x82, 0x21, - 0x4e, 0x55, 0x61, 0x7f, 0x5a, 0x36, 0x55, 0x79, 0xde, 0xd6, 0xd4, 0x98, 0x27, 0x3c, 0x6f, 0xd5, - 0xa8, 0xe2, 0x98, 0x86, 0x2e, 0xb5, 0x2d, 0x3f, 0x8c, 0x92, 0x9e, 0xb7, 0xd7, 0xfc, 0x30, 0xc2, - 0x0c, 0x63, 0xbf, 0x00, 0xb0, 0x70, 0x97, 0xd4, 0xf8, 0x8c, 0xd5, 0x2f, 0x3d, 0x56, 0xfe, 0xa5, - 0xc7, 0xfe, 0x3d, 0x0b, 0x46, 0x16, 0xe7, 0x8c, 0x93, 0x6b, 0x1a, 0x80, 0xdf, 0xd4, 0x6e, 0xdf, - 0x5e, 0x95, 0xee, 0x1f, 0xdc, 0x02, 0xae, 0xa0, 0x58, 0xa3, 0x40, 0x67, 0xa0, 0xd8, 0x68, 0x79, - 0x42, 0xed, 0xd9, 0x4f, 0x8f, 0xc7, 0xe5, 0x96, 0x87, 0x29, 0x4c, 0x7b, 0xac, 0x54, 0xec, 0xfa, - 0xb1, 0x52, 0xc7, 0x20, 0x25, 0xa8, 0x0c, 0xbd, 0x77, 0xee, 0xb8, 0x75, 0xfe, 0x14, 0x5c, 0xb8, - 0xa6, 0xdc, 0xbe, 0xbd, 0x34, 0x1f, 0x62, 0x0e, 0xb7, 0xbf, 0x58, 0x84, 0xa9, 0xc5, 0x06, 0xb9, - 0xfb, 0x0e, 0x9f, 0xc3, 0x77, 0xfb, 0xd4, 0xea, 0x70, 0x0a, 0xa4, 0xc3, 0x3e, 0xa7, 0xeb, 0xdc, - 0x1f, 0x1b, 0xd0, 0xcf, 0x1d, 0x4f, 0xe5, 0xe3, 0xf8, 0x4c, 0x73, 0x5f, 0x7e, 0x87, 0x4c, 0x73, - 0x07, 0x56, 0x61, 0xee, 0x53, 0x07, 0xa6, 0x80, 0x62, 0xc9, 0x7c, 0xea, 0x15, 0x18, 0xd2, 0x29, - 0x0f, 0xf5, 0xb0, 0xf5, 0xbb, 0x8b, 0x30, 0x46, 0x5b, 0xf0, 0x50, 0x07, 0xe2, 0x66, 0x7a, 0x20, - 0x8e, 0xfa, 0x71, 0x63, 0xe7, 0xd1, 0x78, 0x2b, 0x39, 0x1a, 0x57, 0xf2, 0x46, 0xe3, 0xb8, 0xc7, - 0xe0, 0x7b, 0x2c, 0x98, 0x58, 0x6c, 0xf8, 0xb5, 0xed, 0xc4, 0x03, 0xc4, 0x97, 0x60, 0x90, 0x6e, - 0xc7, 0xa1, 0x11, 0x8b, 0xc3, 0x88, 0xce, 0x22, 0x50, 0x58, 0xa7, 0xd3, 0x8a, 0xdd, 0xbc, 0xb9, - 0x34, 0x9f, 0x15, 0xd4, 0x45, 0xa0, 0xb0, 0x4e, 0x67, 0xff, 0x8e, 0x05, 0xe7, 0xae, 0xce, 0x2d, - 0xc4, 0x53, 0x31, 0x15, 0x57, 0xe6, 0x22, 0xf4, 0x35, 0xeb, 0x5a, 0x53, 0x62, 0xb5, 0xf0, 0x3c, - 0x6b, 0x85, 0xc0, 0xbe, 0x5b, 0x62, 0x26, 0xdd, 0x04, 0xb8, 0x8a, 0x2b, 0x73, 0x62, 0xdf, 0x95, - 0x56, 0x20, 0x2b, 0xd7, 0x0a, 0xf4, 0x04, 0xf4, 0xd3, 0x73, 0xc1, 0xad, 0xc9, 0x76, 0x73, 0x83, - 0x3e, 0x07, 0x61, 0x89, 0xb3, 0x7f, 0xd6, 0x82, 0x89, 0xab, 0x6e, 0x44, 0x0f, 0xed, 0x64, 0xe0, - 0x14, 0x7a, 0x6a, 0x87, 0x6e, 0xe4, 0x07, 0x7b, 0xc9, 0xc0, 0x29, 0x58, 0x61, 0xb0, 0x46, 0xc5, - 0x3f, 0x68, 0xd7, 0x65, 0x2f, 0x29, 0x0a, 0xa6, 0xdd, 0x0d, 0x0b, 0x38, 0x56, 0x14, 0xb4, 0xbf, - 0xea, 0x6e, 0xc0, 0x54, 0x96, 0x7b, 0x62, 0xe3, 0x56, 0xfd, 0x35, 0x2f, 0x11, 0x38, 0xa6, 0xb1, - 0xff, 0xdc, 0x82, 0xf2, 0xd5, 0x46, 0x2b, 0x8c, 0x48, 0xb0, 0x11, 0xe6, 0x6c, 0xba, 0x2f, 0x40, - 0x89, 0x48, 0x03, 0x81, 0x7c, 0xf2, 0x29, 0x05, 0x51, 0x65, 0x39, 0xe0, 0xf1, 0x5b, 0x14, 0x5d, - 0x17, 0xaf, 0xa4, 0x0f, 0xf7, 0xcc, 0x75, 0x11, 0x10, 0xd1, 0xeb, 0xd2, 0x03, 0xda, 0xb0, 0xc8, - 0x18, 0x0b, 0x29, 0x2c, 0xce, 0x28, 0x61, 0xff, 0x98, 0x05, 0x27, 0xd5, 0x07, 0xbf, 0xeb, 0x3e, - 0xd3, 0xfe, 0x6a, 0x01, 0x86, 0xaf, 0xad, 0xad, 0x55, 0xae, 0x92, 0x48, 0x9b, 0x95, 0xed, 0xcd, - 0xfe, 0x58, 0xb3, 0x5e, 0xb6, 0xbb, 0x23, 0xb6, 0x22, 0xb7, 0x31, 0xcd, 0xe3, 0xa2, 0x4d, 0x2f, - 0x79, 0xd1, 0x8d, 0xa0, 0x1a, 0x05, 0xae, 0xb7, 0x99, 0x39, 0xd3, 0xa5, 0xcc, 0x52, 0xcc, 0x93, - 0x59, 0xd0, 0x0b, 0xd0, 0xc7, 0x02, 0xb3, 0xc9, 0x41, 0x78, 0x44, 0x5d, 0xb1, 0x18, 0xf4, 0x60, - 0xbf, 0x5c, 0xba, 0x89, 0x97, 0xf8, 0x1f, 0x2c, 0x48, 0xd1, 0x4d, 0x18, 0xdc, 0x8a, 0xa2, 0xe6, - 0x35, 0xe2, 0xd4, 0x49, 0x20, 0x77, 0xd9, 0xf3, 0x59, 0xbb, 0x2c, 0xed, 0x04, 0x4e, 0x16, 0x6f, - 0x4c, 0x31, 0x2c, 0xc4, 0x3a, 0x1f, 0xbb, 0x0a, 0x10, 0xe3, 0x8e, 0xc8, 0x70, 0x63, 0xaf, 0x41, - 0x89, 0x7e, 0xee, 0x4c, 0xc3, 0x75, 0xda, 0x9b, 0xc6, 0x9f, 0x81, 0x92, 0x34, 0x7c, 0x87, 0x22, - 0x8a, 0x03, 0x3b, 0x91, 0xa4, 0x5d, 0x3c, 0xc4, 0x31, 0xde, 0x7e, 0x1c, 0x84, 0x6f, 0x69, 0x3b, - 0x96, 0xf6, 0x06, 0x9c, 0x60, 0x4e, 0xb2, 0x4e, 0xb4, 0x65, 0xcc, 0xd1, 0xce, 0x93, 0xe1, 0x59, - 0x71, 0xaf, 0xe3, 0x5f, 0x36, 0xa9, 0x3d, 0x4e, 0x1e, 0x92, 0x1c, 0xe3, 0x3b, 0x9e, 0xfd, 0x67, - 0x3d, 0xf0, 0xc8, 0x52, 0x35, 0x3f, 0xfc, 0xd0, 0xcb, 0x30, 0xc4, 0xc5, 0x45, 0x3a, 0x35, 0x9c, - 0x86, 0xa8, 0x57, 0x69, 0x40, 0xd7, 0x34, 0x1c, 0x36, 0x28, 0xd1, 0x39, 0x28, 0xba, 0x6f, 0x7b, - 0xc9, 0xa7, 0x7b, 0x4b, 0x6f, 0xac, 0x62, 0x0a, 0xa7, 0x68, 0x2a, 0x79, 0xf2, 0x2d, 0x5d, 0xa1, - 0x95, 0xf4, 0xf9, 0x1a, 0x8c, 0xb8, 0x61, 0x2d, 0x74, 0x97, 0x3c, 0xba, 0x4e, 0xb5, 0x95, 0xae, - 0x74, 0x0e, 0xb4, 0xd1, 0x0a, 0x8b, 0x13, 0xd4, 0xda, 0xf9, 0xd2, 0xdb, 0xb5, 0xf4, 0xda, 0x31, - 0xf8, 0x01, 0xdd, 0xfe, 0x9b, 0xec, 0xeb, 0x42, 0xa6, 0x82, 0x17, 0xdb, 0x3f, 0xff, 0xe0, 0x10, - 0x4b, 0x1c, 0xbd, 0xd0, 0xd5, 0xb6, 0x9c, 0xe6, 0x4c, 0x2b, 0xda, 0x9a, 0x77, 0xc3, 0x9a, 0xbf, - 0x4b, 0x82, 0x3d, 0x76, 0x17, 0x1f, 0x88, 0x2f, 0x74, 0x0a, 0x31, 0x77, 0x6d, 0xa6, 0x42, 0x29, - 0x71, 0xba, 0x0c, 0x9a, 0x81, 0x51, 0x09, 0xac, 0x92, 0x90, 0x1d, 0x01, 0x83, 0x8c, 0x8d, 0x7a, - 0x4c, 0x27, 0xc0, 0x8a, 0x49, 0x92, 0xde, 0x14, 0x70, 0xe1, 0x28, 0x04, 0xdc, 0x0f, 0xc2, 0xb0, - 0xeb, 0xb9, 0x91, 0xeb, 0x44, 0x3e, 0xb7, 0x1f, 0xf1, 0x6b, 0x37, 0x53, 0x30, 0x2f, 0xe9, 0x08, - 0x6c, 0xd2, 0xd9, 0xff, 0xb6, 0x07, 0xc6, 0xd9, 0xb0, 0xbd, 0x37, 0xc3, 0xbe, 0x9d, 0x66, 0xd8, - 0xcd, 0xf4, 0x0c, 0x3b, 0x0a, 0xc9, 0xfd, 0x81, 0xa7, 0xd9, 0x67, 0xa0, 0xa4, 0xde, 0x0f, 0xca, - 0x07, 0xc4, 0x56, 0xce, 0x03, 0xe2, 0xce, 0xa7, 0xb7, 0x74, 0x49, 0x2b, 0x66, 0xba, 0xa4, 0x7d, - 0xd9, 0x82, 0xd8, 0xb0, 0x80, 0xde, 0x80, 0x52, 0xd3, 0x67, 0x1e, 0xae, 0x81, 0x74, 0x1b, 0x7f, - 0xbc, 0xad, 0x65, 0x82, 0x47, 0x60, 0x0b, 0x78, 0x2f, 0x54, 0x64, 0x51, 0x1c, 0x73, 0x41, 0xd7, - 0xa1, 0xbf, 0x19, 0x90, 0x6a, 0xc4, 0xc2, 0x03, 0x75, 0xcf, 0x90, 0xcf, 0x1a, 0x5e, 0x10, 0x4b, - 0x0e, 0xf6, 0xbf, 0xb7, 0x60, 0x2c, 0x49, 0x8a, 0x3e, 0x0c, 0x3d, 0xe4, 0x2e, 0xa9, 0x89, 0xf6, - 0x66, 0x1e, 0xc5, 0xb1, 0x6a, 0x82, 0x77, 0x00, 0xfd, 0x8f, 0x59, 0x29, 0x74, 0x0d, 0xfa, 0xe9, - 0x39, 0x7c, 0x55, 0x85, 0xc2, 0x7b, 0x34, 0xef, 0x2c, 0x57, 0x02, 0x0d, 0x6f, 0x9c, 0x00, 0x61, - 0x59, 0x9c, 0xf9, 0x81, 0xd5, 0x9a, 0x55, 0x7a, 0xc5, 0x89, 0xda, 0xdd, 0xc4, 0xd7, 0xe6, 0x2a, - 0x9c, 0x48, 0x70, 0xe3, 0x7e, 0x60, 0x12, 0x88, 0x63, 0x26, 0xf6, 0x2f, 0x58, 0x00, 0xdc, 0xed, - 0xcd, 0xf1, 0x36, 0xc9, 0x31, 0x68, 0xd3, 0xe7, 0xa1, 0x27, 0x6c, 0x92, 0x5a, 0x3b, 0xe7, 0xeb, - 0xb8, 0x3d, 0xd5, 0x26, 0xa9, 0xc5, 0x33, 0x8e, 0xfe, 0xc3, 0xac, 0xb4, 0xfd, 0xbd, 0x00, 0x23, - 0x31, 0xd9, 0x52, 0x44, 0x76, 0xd0, 0x73, 0x46, 0xd0, 0x91, 0x33, 0x89, 0xa0, 0x23, 0x25, 0x46, - 0xad, 0x29, 0x6e, 0x3f, 0x03, 0xc5, 0x1d, 0xe7, 0xae, 0xd0, 0xcc, 0x3d, 0xd3, 0xbe, 0x19, 0x94, - 0xff, 0xf4, 0x8a, 0x73, 0x97, 0x5f, 0x5e, 0x9f, 0x91, 0x2b, 0x64, 0xc5, 0xb9, 0xdb, 0xd1, 0x41, - 0x98, 0x56, 0xc2, 0xea, 0x72, 0x3d, 0xe1, 0xd1, 0xd5, 0x55, 0x5d, 0xae, 0x97, 0xac, 0xcb, 0xf5, - 0xba, 0xa8, 0xcb, 0xf5, 0xd0, 0x3d, 0xe8, 0x17, 0x0e, 0x97, 0x22, 0x2c, 0xd9, 0xe5, 0x2e, 0xea, - 0x13, 0xfe, 0x9a, 0xbc, 0xce, 0xcb, 0xf2, 0x72, 0x2e, 0xa0, 0x1d, 0xeb, 0x95, 0x15, 0xa2, 0xff, - 0xc7, 0x82, 0x11, 0xf1, 0x1b, 0x93, 0xb7, 0x5b, 0x24, 0x8c, 0x84, 0xf0, 0xfa, 0x81, 0xee, 0xdb, - 0x20, 0x0a, 0xf2, 0xa6, 0x7c, 0x40, 0x9e, 0x33, 0x26, 0xb2, 0x63, 0x8b, 0x12, 0xad, 0x40, 0x7f, - 0xdb, 0x82, 0x13, 0x3b, 0xce, 0x5d, 0x5e, 0x23, 0x87, 0x61, 0x27, 0x72, 0x7d, 0xe1, 0xb8, 0xf0, - 0xe1, 0xee, 0x86, 0x3f, 0x55, 0x9c, 0x37, 0x52, 0x5a, 0x29, 0x4f, 0x64, 0x91, 0x74, 0x6c, 0x6a, - 0x66, 0xbb, 0xa6, 0x36, 0x60, 0x40, 0xce, 0xb7, 0x87, 0xe9, 0xdd, 0xcd, 0xea, 0x11, 0x73, 0xed, - 0xa1, 0xd6, 0xf3, 0x19, 0x18, 0xd2, 0xe7, 0xd8, 0x43, 0xad, 0xeb, 0x6d, 0x98, 0xc8, 0x98, 0x4b, - 0x0f, 0xb5, 0xca, 0x3b, 0x70, 0x26, 0x77, 0x7e, 0x3c, 0x54, 0xef, 0xfc, 0xaf, 0x5a, 0xfa, 0x3e, - 0x78, 0x0c, 0x26, 0x8d, 0x39, 0xd3, 0xa4, 0x71, 0xbe, 0xfd, 0xca, 0xc9, 0xb1, 0x6b, 0xbc, 0xa5, - 0x37, 0x9a, 0xee, 0xea, 0xe8, 0x75, 0xe8, 0x6b, 0x50, 0x88, 0x74, 0xdb, 0xb5, 0x3b, 0xaf, 0xc8, - 0x58, 0x98, 0x64, 0xf0, 0x10, 0x0b, 0x0e, 0xf6, 0x2f, 0x5b, 0xd0, 0x73, 0x0c, 0x3d, 0x81, 0xcd, - 0x9e, 0x78, 0x2e, 0x97, 0xb5, 0x88, 0xd0, 0x3e, 0x8d, 0x9d, 0x3b, 0x0b, 0x77, 0x23, 0xe2, 0x85, - 0xec, 0x44, 0xce, 0xec, 0x98, 0x9f, 0xb2, 0x60, 0x62, 0xd9, 0x77, 0xea, 0xb3, 0x4e, 0xc3, 0xf1, - 0x6a, 0x24, 0x58, 0xf2, 0x36, 0x0f, 0xe5, 0x73, 0x5e, 0xe8, 0xe8, 0x73, 0x3e, 0x27, 0x5d, 0xb6, - 0x7a, 0xf2, 0xc7, 0x8f, 0x4a, 0xd2, 0xc9, 0x30, 0x4c, 0x86, 0x73, 0xf1, 0x16, 0x20, 0xbd, 0x95, - 0xe2, 0xe5, 0x15, 0x86, 0x7e, 0x97, 0xb7, 0x57, 0x0c, 0xe2, 0x93, 0xd9, 0x12, 0x6e, 0xea, 0xf3, - 0xb4, 0x37, 0x45, 0x1c, 0x80, 0x25, 0x23, 0xfb, 0x65, 0xc8, 0x0c, 0x9b, 0xd1, 0x59, 0x7b, 0x61, - 0x7f, 0x1c, 0xc6, 0x59, 0xc9, 0x43, 0x6a, 0x06, 0xec, 0x84, 0xce, 0x35, 0x23, 0x04, 0xa8, 0xfd, - 0x05, 0x0b, 0x46, 0x57, 0x13, 0x91, 0x11, 0x2f, 0x32, 0x2b, 0x6d, 0x86, 0xaa, 0xbf, 0xca, 0xa0, - 0x58, 0x60, 0x8f, 0x5c, 0x15, 0xf6, 0xd7, 0x16, 0xc4, 0x91, 0x6c, 0x8e, 0x41, 0x7c, 0x9b, 0x33, - 0xc4, 0xb7, 0x4c, 0x41, 0x56, 0x35, 0x27, 0x4f, 0x7a, 0x43, 0xd7, 0x55, 0x8c, 0xb7, 0x36, 0x32, - 0x6c, 0xcc, 0x86, 0x4f, 0xc5, 0x11, 0x33, 0x10, 0x9c, 0x8c, 0xfa, 0x66, 0xff, 0x7e, 0x01, 0x90, - 0xa2, 0xed, 0x3a, 0x06, 0x5d, 0xba, 0xc4, 0xd1, 0xc4, 0xa0, 0xdb, 0x05, 0xc4, 0xfc, 0x0c, 0x02, - 0xc7, 0x0b, 0x39, 0x5b, 0x57, 0x28, 0xff, 0x0e, 0xe7, 0xc4, 0x30, 0x25, 0x1f, 0xa5, 0x2d, 0xa7, - 0xb8, 0xe1, 0x8c, 0x1a, 0x34, 0xff, 0x91, 0xde, 0x6e, 0xfd, 0x47, 0xfa, 0x3a, 0xbc, 0xae, 0xfc, - 0x8a, 0x05, 0xc3, 0xaa, 0x9b, 0xde, 0x25, 0x3e, 0xf8, 0xaa, 0x3d, 0x39, 0x1b, 0x68, 0x45, 0x6b, - 0x32, 0x3b, 0x58, 0xbe, 0x83, 0xbd, 0x92, 0x75, 0x1a, 0xee, 0x3d, 0xa2, 0x62, 0x96, 0x96, 0xc5, - 0xab, 0x57, 0x01, 0x3d, 0xd8, 0x2f, 0x0f, 0xab, 0x7f, 0x3c, 0x26, 0x7b, 0x5c, 0x84, 0x6e, 0xc9, - 0xa3, 0x89, 0xa9, 0x88, 0x5e, 0x82, 0xde, 0xe6, 0x96, 0x13, 0x92, 0xc4, 0x5b, 0xa5, 0xde, 0x0a, - 0x05, 0x1e, 0xec, 0x97, 0x47, 0x54, 0x01, 0x06, 0xc1, 0x9c, 0xba, 0xfb, 0xc8, 0x7e, 0xe9, 0xc9, - 0xd9, 0x31, 0xb2, 0xdf, 0x5f, 0x5a, 0xd0, 0xb3, 0xea, 0xd7, 0x8f, 0x63, 0x0b, 0x78, 0xcd, 0xd8, - 0x02, 0xce, 0xe6, 0xa5, 0xcb, 0xc8, 0x5d, 0xfd, 0x8b, 0x89, 0xd5, 0x7f, 0x3e, 0x97, 0x43, 0xfb, - 0x85, 0xbf, 0x03, 0x83, 0x2c, 0x09, 0x87, 0x78, 0x97, 0xf5, 0x82, 0xb1, 0xe0, 0xcb, 0x89, 0x05, - 0x3f, 0xaa, 0x91, 0x6a, 0x2b, 0xfd, 0x29, 0xe8, 0x17, 0x0f, 0x7d, 0x92, 0x8f, 0x8d, 0x05, 0x2d, - 0x96, 0x78, 0xfb, 0xc7, 0x8b, 0x60, 0x24, 0xfd, 0x40, 0xbf, 0x6a, 0xc1, 0x74, 0xc0, 0x1d, 0x80, - 0xeb, 0xf3, 0xad, 0xc0, 0xf5, 0x36, 0xab, 0xb5, 0x2d, 0x52, 0x6f, 0x35, 0x5c, 0x6f, 0x73, 0x69, - 0xd3, 0xf3, 0x15, 0x78, 0xe1, 0x2e, 0xa9, 0xb5, 0x98, 0x71, 0xae, 0x43, 0x86, 0x11, 0xe5, 0x48, - 0xff, 0xfc, 0xfd, 0xfd, 0xf2, 0x34, 0x3e, 0x14, 0x6f, 0x7c, 0xc8, 0xb6, 0xa0, 0xdf, 0xb1, 0xe0, - 0x32, 0xcf, 0x85, 0xd1, 0x7d, 0xfb, 0xdb, 0xdc, 0x96, 0x2b, 0x92, 0x55, 0xcc, 0x64, 0x8d, 0x04, - 0x3b, 0xb3, 0x1f, 0x14, 0x1d, 0x7a, 0xb9, 0x72, 0xb8, 0xba, 0xf0, 0x61, 0x1b, 0x67, 0xff, 0xc3, - 0x22, 0x0c, 0x8b, 0x08, 0x70, 0xe2, 0x0c, 0x78, 0xc9, 0x98, 0x12, 0x8f, 0x26, 0xa6, 0xc4, 0xb8, - 0x41, 0x7c, 0x34, 0xdb, 0x7f, 0x08, 0xe3, 0x74, 0x73, 0xbe, 0x46, 0x9c, 0x20, 0x5a, 0x27, 0x0e, - 0x77, 0x0b, 0x2b, 0x1e, 0x7a, 0xf7, 0x57, 0xfa, 0xc9, 0xe5, 0x24, 0x33, 0x9c, 0xe6, 0xff, 0xed, - 0x74, 0xe6, 0x78, 0x30, 0x96, 0x0a, 0xe2, 0xf7, 0x26, 0x94, 0xd4, 0x2b, 0x15, 0xb1, 0xe9, 0xb4, - 0x8f, 0x85, 0x99, 0xe4, 0xc0, 0xd5, 0x5f, 0xf1, 0x0b, 0xa9, 0x98, 0x9d, 0xfd, 0x77, 0x0b, 0x46, - 0x85, 0x7c, 0x10, 0x57, 0x61, 0xc0, 0x09, 0x43, 0x77, 0xd3, 0x23, 0xf5, 0x76, 0x1a, 0xca, 0x54, - 0x35, 0xec, 0xa5, 0xd0, 0x8c, 0x28, 0x89, 0x15, 0x0f, 0x74, 0x8d, 0x3b, 0xdf, 0xed, 0x92, 0x76, - 0xea, 0xc9, 0x14, 0x37, 0x90, 0xee, 0x79, 0xbb, 0x04, 0x8b, 0xf2, 0xe8, 0x93, 0xdc, 0x3b, 0xf2, - 0xba, 0xe7, 0xdf, 0xf1, 0xae, 0xfa, 0xbe, 0x8c, 0xf6, 0xd1, 0x1d, 0xc3, 0x71, 0xe9, 0x13, 0xa9, - 0x8a, 0x63, 0x93, 0x5b, 0x77, 0x51, 0x71, 0x3f, 0x0b, 0x2c, 0xf6, 0xbf, 0xf9, 0x28, 0x3c, 0x44, - 0x04, 0x46, 0x45, 0x78, 0x41, 0x09, 0x13, 0x7d, 0x97, 0x79, 0x95, 0x33, 0x4b, 0xc7, 0x8a, 0xf4, - 0xeb, 0x26, 0x0b, 0x9c, 0xe4, 0x69, 0xff, 0x8c, 0x05, 0xec, 0x81, 0xec, 0x31, 0xc8, 0x23, 0x1f, - 0x31, 0xe5, 0x91, 0xc9, 0xbc, 0x4e, 0xce, 0x11, 0x45, 0x5e, 0xe4, 0x33, 0xab, 0x12, 0xf8, 0x77, - 0xf7, 0x84, 0x4b, 0x4b, 0xe7, 0xfb, 0x87, 0xfd, 0xdf, 0x2d, 0xbe, 0x89, 0xc5, 0xe1, 0x04, 0x3e, - 0x07, 0x03, 0x35, 0xa7, 0xe9, 0xd4, 0x78, 0x86, 0xaa, 0x5c, 0x8d, 0x9e, 0x51, 0x68, 0x7a, 0x4e, - 0x94, 0xe0, 0x1a, 0x2a, 0x19, 0xa6, 0x72, 0x40, 0x82, 0x3b, 0x6a, 0xa5, 0x54, 0x95, 0x53, 0xdb, - 0x30, 0x6c, 0x30, 0x7b, 0xa8, 0xea, 0x8c, 0xcf, 0xf1, 0x23, 0x56, 0x85, 0x55, 0xdd, 0x81, 0x71, - 0x4f, 0xfb, 0x4f, 0x0f, 0x14, 0x79, 0xb9, 0x7c, 0xbc, 0xd3, 0x21, 0xca, 0x4e, 0x1f, 0xed, 0xed, - 0x6d, 0x82, 0x0d, 0x4e, 0x73, 0xb6, 0x7f, 0xc2, 0x82, 0xd3, 0x3a, 0xa1, 0xf6, 0xbc, 0xa7, 0x93, - 0x91, 0x64, 0x1e, 0x06, 0xfc, 0x26, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xa9, 0x71, 0x49, 0x76, 0xfa, - 0x0d, 0x01, 0x3f, 0x10, 0xf9, 0x16, 0x24, 0x77, 0x09, 0xc7, 0xaa, 0x24, 0xbd, 0x7d, 0xb2, 0xce, - 0x08, 0xc5, 0x43, 0x2e, 0xb6, 0x07, 0x30, 0x7b, 0x7b, 0x88, 0x05, 0xc6, 0xfe, 0x33, 0x8b, 0x4f, - 0x2c, 0xbd, 0xe9, 0xe8, 0x6d, 0x18, 0xdb, 0x71, 0xa2, 0xda, 0xd6, 0xc2, 0xdd, 0x66, 0xc0, 0x4d, - 0x4e, 0xb2, 0x9f, 0x9e, 0xe9, 0xd4, 0x4f, 0xda, 0x47, 0xc6, 0x0e, 0x9f, 0x2b, 0x09, 0x66, 0x38, - 0xc5, 0x1e, 0xad, 0xc3, 0x20, 0x83, 0xb1, 0x37, 0x8a, 0x61, 0x3b, 0xd1, 0x20, 0xaf, 0x36, 0xe5, - 0xb2, 0xb0, 0x12, 0xf3, 0xc1, 0x3a, 0x53, 0xfb, 0xcb, 0x45, 0xbe, 0xda, 0x99, 0x28, 0xff, 0x14, - 0xf4, 0x37, 0xfd, 0xfa, 0xdc, 0xd2, 0x3c, 0x16, 0xa3, 0xa0, 0x8e, 0x91, 0x0a, 0x07, 0x63, 0x89, - 0x47, 0x97, 0x60, 0x40, 0xfc, 0x94, 0x26, 0x42, 0xb6, 0x37, 0x0b, 0xba, 0x10, 0x2b, 0x2c, 0x7a, - 0x1e, 0xa0, 0x19, 0xf8, 0xbb, 0x6e, 0x9d, 0xc5, 0x2c, 0x29, 0x9a, 0xde, 0x46, 0x15, 0x85, 0xc1, - 0x1a, 0x15, 0x7a, 0x15, 0x86, 0x5b, 0x5e, 0xc8, 0xc5, 0x11, 0x2d, 0x32, 0xb4, 0xf2, 0x83, 0xb9, - 0xa9, 0x23, 0xb1, 0x49, 0x8b, 0x66, 0xa0, 0x2f, 0x72, 0x98, 0xf7, 0x4c, 0x6f, 0xbe, 0x53, 0xf0, - 0x1a, 0xa5, 0xd0, 0x93, 0x21, 0xd1, 0x02, 0x58, 0x14, 0x44, 0x6f, 0xca, 0xe7, 0xc2, 0x7c, 0x63, - 0x17, 0xde, 0xf8, 0xdd, 0x1d, 0x02, 0xda, 0x63, 0x61, 0xe1, 0xe5, 0x6f, 0xf0, 0x42, 0xaf, 0x00, - 0x90, 0xbb, 0x11, 0x09, 0x3c, 0xa7, 0xa1, 0x7c, 0xde, 0x94, 0x5c, 0x30, 0xef, 0xaf, 0xfa, 0xd1, - 0xcd, 0x90, 0x2c, 0x28, 0x0a, 0xac, 0x51, 0xdb, 0xbf, 0x53, 0x02, 0x88, 0xe5, 0x76, 0x74, 0x2f, - 0xb5, 0x71, 0x3d, 0xdb, 0x5e, 0xd2, 0x3f, 0xba, 0x5d, 0x0b, 0x7d, 0x9f, 0x05, 0x83, 0x22, 0x34, - 0x0b, 0x1b, 0xa1, 0x42, 0xfb, 0x8d, 0xd3, 0x8c, 0x10, 0x43, 0x4b, 0xf0, 0x26, 0xbc, 0x20, 0x67, - 0xa8, 0x86, 0xe9, 0xd8, 0x0a, 0xbd, 0x62, 0xf4, 0x7e, 0x79, 0x55, 0x2c, 0x1a, 0x5d, 0xa9, 0xae, - 0x8a, 0x25, 0x76, 0x46, 0xe8, 0xb7, 0xc4, 0x9b, 0xc6, 0x2d, 0xb1, 0x27, 0xff, 0x3d, 0xa4, 0x21, - 0xbe, 0x76, 0xba, 0x20, 0xa2, 0x8a, 0x1e, 0x1b, 0xa1, 0x37, 0xff, 0x11, 0x9f, 0x76, 0x4f, 0xea, - 0x10, 0x17, 0xe1, 0x33, 0x30, 0x5a, 0x37, 0x85, 0x00, 0x31, 0x13, 0x9f, 0xcc, 0xe3, 0x9b, 0x90, - 0x19, 0xe2, 0x63, 0x3f, 0x81, 0xc0, 0x49, 0xc6, 0xa8, 0xc2, 0x43, 0x65, 0x2c, 0x79, 0x1b, 0xbe, - 0x78, 0x11, 0x62, 0xe7, 0x8e, 0xe5, 0x5e, 0x18, 0x91, 0x1d, 0x4a, 0x19, 0x9f, 0xee, 0xab, 0xa2, - 0x2c, 0x56, 0x5c, 0xd0, 0xeb, 0xd0, 0xc7, 0x5e, 0x71, 0x85, 0x93, 0x03, 0xf9, 0x1a, 0x67, 0x33, - 0x66, 0x60, 0xbc, 0x20, 0xd9, 0xdf, 0x10, 0x0b, 0x0e, 0xe8, 0x9a, 0x7c, 0x23, 0x19, 0x2e, 0x79, - 0x37, 0x43, 0xc2, 0xde, 0x48, 0x96, 0x66, 0x1f, 0x8f, 0x9f, 0x3f, 0x72, 0x78, 0x66, 0xca, 0x44, - 0xa3, 0x24, 0x95, 0xa2, 0xc4, 0x7f, 0x99, 0x89, 0x51, 0x44, 0x38, 0xca, 0x6c, 0x9e, 0x99, 0xad, - 0x31, 0xee, 0xce, 0x5b, 0x26, 0x0b, 0x9c, 0xe4, 0x49, 0x25, 0x52, 0xbe, 0xea, 0xc5, 0x9b, 0x92, - 0x4e, 0x7b, 0x07, 0xbf, 0x88, 0xb3, 0xd3, 0x88, 0x43, 0xb0, 0x28, 0x7f, 0xac, 0xe2, 0xc1, 0x94, - 0x07, 0x63, 0xc9, 0x25, 0xfa, 0x50, 0xc5, 0x91, 0x3f, 0xe9, 0x81, 0x11, 0x73, 0x4a, 0xa1, 0xcb, - 0x50, 0x12, 0x4c, 0x54, 0x36, 0x13, 0xb5, 0x4a, 0x56, 0x24, 0x02, 0xc7, 0x34, 0x2c, 0x89, 0x0d, - 0x2b, 0xae, 0x39, 0x11, 0xc7, 0x49, 0x6c, 0x14, 0x06, 0x6b, 0x54, 0xf4, 0x62, 0xb5, 0xee, 0xfb, - 0x91, 0x3a, 0x90, 0xd4, 0xbc, 0x9b, 0x65, 0x50, 0x2c, 0xb0, 0xf4, 0x20, 0xda, 0x26, 0x81, 0x47, - 0x1a, 0x66, 0x14, 0x71, 0x75, 0x10, 0x5d, 0xd7, 0x91, 0xd8, 0xa4, 0xa5, 0xc7, 0xa9, 0x1f, 0xb2, - 0x89, 0x2c, 0xae, 0x6f, 0xb1, 0x53, 0x76, 0x95, 0x3f, 0x2f, 0x97, 0x78, 0xf4, 0x71, 0x38, 0xad, - 0x22, 0x76, 0x61, 0x6e, 0xcd, 0x90, 0x35, 0xf6, 0x19, 0xda, 0x96, 0xd3, 0x73, 0xd9, 0x64, 0x38, - 0xaf, 0x3c, 0x7a, 0x0d, 0x46, 0x84, 0x88, 0x2f, 0x39, 0xf6, 0x9b, 0x1e, 0x46, 0xd7, 0x0d, 0x2c, - 0x4e, 0x50, 0xcb, 0x38, 0xe8, 0x4c, 0xca, 0x96, 0x1c, 0x06, 0xd2, 0x71, 0xd0, 0x75, 0x3c, 0x4e, - 0x95, 0x40, 0x33, 0x30, 0xca, 0x65, 0x30, 0xd7, 0xdb, 0xe4, 0x63, 0x22, 0x9e, 0x7c, 0xa9, 0x25, - 0x75, 0xc3, 0x44, 0xe3, 0x24, 0x3d, 0x7a, 0x19, 0x86, 0x9c, 0xa0, 0xb6, 0xe5, 0x46, 0xa4, 0x16, - 0xb5, 0x02, 0xfe, 0x16, 0x4c, 0x73, 0xd1, 0x9a, 0xd1, 0x70, 0xd8, 0xa0, 0xb4, 0xef, 0xc1, 0x44, - 0x46, 0xdc, 0x09, 0x3a, 0x71, 0x9c, 0xa6, 0x2b, 0xbf, 0x29, 0xe1, 0x07, 0x3d, 0x53, 0x59, 0x92, - 0x5f, 0xa3, 0x51, 0xd1, 0xd9, 0xc9, 0xe2, 0x53, 0x68, 0x89, 0x57, 0xd5, 0xec, 0x5c, 0x94, 0x08, - 0x1c, 0xd3, 0xd8, 0xff, 0xa9, 0x00, 0xa3, 0x19, 0xb6, 0x15, 0x96, 0xfc, 0x33, 0x71, 0x49, 0x89, - 0x73, 0x7d, 0x9a, 0x61, 0xf5, 0x0b, 0x87, 0x08, 0xab, 0x5f, 0xec, 0x14, 0x56, 0xbf, 0xe7, 0x9d, - 0x84, 0xd5, 0x37, 0x7b, 0xac, 0xb7, 0xab, 0x1e, 0xcb, 0x08, 0xc5, 0xdf, 0x77, 0xc8, 0x50, 0xfc, - 0x46, 0xa7, 0xf7, 0x77, 0xd1, 0xe9, 0x3f, 0x52, 0x80, 0xb1, 0xa4, 0x2b, 0xe9, 0x31, 0xe8, 0x6d, - 0x5f, 0x37, 0xf4, 0xb6, 0x97, 0xba, 0x79, 0xa2, 0x9b, 0xab, 0xc3, 0xc5, 0x09, 0x1d, 0xee, 0xd3, - 0x5d, 0x71, 0x6b, 0xaf, 0xcf, 0xfd, 0xc9, 0x02, 0x9c, 0xcc, 0x7c, 0x23, 0x7c, 0x0c, 0x7d, 0x73, - 0xc3, 0xe8, 0x9b, 0xe7, 0xba, 0x7e, 0xbe, 0x9c, 0xdb, 0x41, 0xb7, 0x13, 0x1d, 0x74, 0xb9, 0x7b, - 0x96, 0xed, 0x7b, 0xe9, 0xeb, 0x45, 0x38, 0x9f, 0x59, 0x2e, 0x56, 0x7b, 0x2e, 0x1a, 0x6a, 0xcf, - 0xe7, 0x13, 0x6a, 0x4f, 0xbb, 0x7d, 0xe9, 0xa3, 0xd1, 0x83, 0x8a, 0x67, 0xbc, 0x2c, 0x18, 0xc1, - 0x03, 0xea, 0x40, 0x8d, 0x67, 0xbc, 0x8a, 0x11, 0x36, 0xf9, 0x7e, 0x3b, 0xe9, 0x3e, 0x7f, 0xdb, - 0x82, 0x33, 0x99, 0x63, 0x73, 0x0c, 0xba, 0xae, 0x55, 0x53, 0xd7, 0xf5, 0x54, 0xd7, 0xb3, 0x35, - 0x47, 0xf9, 0xf5, 0xd3, 0xbd, 0x39, 0xdf, 0xc2, 0x6e, 0xf2, 0x37, 0x60, 0xd0, 0xa9, 0xd5, 0x48, - 0x18, 0xae, 0xf8, 0x75, 0x15, 0x81, 0xfb, 0x39, 0x76, 0xcf, 0x8a, 0xc1, 0x07, 0xfb, 0xe5, 0xa9, - 0x24, 0x8b, 0x18, 0x8d, 0x75, 0x0e, 0xe8, 0x93, 0x30, 0x10, 0x8a, 0x73, 0x53, 0x8c, 0xfd, 0x0b, - 0x5d, 0x76, 0x8e, 0xb3, 0x4e, 0x1a, 0x66, 0xa8, 0x27, 0xa5, 0xa9, 0x50, 0x2c, 0xcd, 0xb0, 0x30, - 0x85, 0x23, 0x0d, 0x0b, 0xf3, 0x3c, 0xc0, 0xae, 0xba, 0x0c, 0x24, 0xf5, 0x0f, 0xda, 0x35, 0x41, - 0xa3, 0x42, 0x1f, 0x85, 0xb1, 0x90, 0xc7, 0x42, 0x9c, 0x6b, 0x38, 0x21, 0x7b, 0x6d, 0x23, 0x66, - 0x21, 0x0b, 0x27, 0x55, 0x4d, 0xe0, 0x70, 0x8a, 0x1a, 0x2d, 0xca, 0x5a, 0x59, 0xe0, 0x46, 0x3e, - 0x31, 0x2f, 0xc6, 0x35, 0x8a, 0xd4, 0xe3, 0x27, 0x92, 0xdd, 0xcf, 0x3a, 0x5e, 0x2b, 0x89, 0x3e, - 0x09, 0x40, 0xa7, 0x8f, 0xd0, 0x43, 0xf4, 0xe7, 0x6f, 0x9e, 0x74, 0x57, 0xa9, 0x67, 0x3a, 0x37, - 0xb3, 0x97, 0xb7, 0xf3, 0x8a, 0x09, 0xd6, 0x18, 0x22, 0x07, 0x86, 0xe3, 0x7f, 0x71, 0x66, 0xde, - 0x4b, 0xb9, 0x35, 0x24, 0x99, 0x33, 0x95, 0xf7, 0xbc, 0xce, 0x02, 0x9b, 0x1c, 0xed, 0x7f, 0x37, - 0x00, 0x8f, 0xb4, 0xd9, 0x86, 0xd1, 0x8c, 0x69, 0xea, 0x7d, 0x26, 0x79, 0x7f, 0x9f, 0xca, 0x2c, - 0x6c, 0x5c, 0xe8, 0x13, 0xb3, 0xbd, 0xf0, 0x8e, 0x67, 0xfb, 0x0f, 0x59, 0x9a, 0x66, 0x85, 0x3b, - 0x95, 0x7e, 0xe4, 0x90, 0xc7, 0xcb, 0x11, 0xaa, 0x5a, 0x36, 0x32, 0xf4, 0x15, 0xcf, 0x77, 0xdd, - 0x9c, 0xee, 0x15, 0x18, 0x5f, 0xcd, 0x0e, 0x00, 0xcc, 0x55, 0x19, 0x57, 0x0f, 0xfb, 0xfd, 0xc7, - 0x15, 0x0c, 0xf8, 0xf7, 0x2d, 0x38, 0x93, 0x02, 0xf3, 0x36, 0x90, 0x50, 0xc4, 0xa8, 0x5a, 0x7d, - 0xc7, 0x8d, 0x97, 0x0c, 0xf9, 0x37, 0x5c, 0x13, 0xdf, 0x70, 0x26, 0x97, 0x2e, 0xd9, 0xf4, 0x1f, - 0xfc, 0xa3, 0xf2, 0x04, 0xab, 0xc0, 0x24, 0xc4, 0xf9, 0x4d, 0x3f, 0xde, 0x8b, 0xff, 0x37, 0x27, - 0xf6, 0xf1, 0xd4, 0x32, 0x9c, 0x6f, 0xdf, 0xd5, 0x87, 0x7a, 0x9e, 0xfc, 0x7b, 0x16, 0x9c, 0x6b, - 0x1b, 0x03, 0xe7, 0x5b, 0x50, 0xce, 0xb5, 0x3f, 0x6f, 0xc1, 0xa3, 0x99, 0x25, 0x0c, 0xef, 0xb8, - 0xcb, 0x50, 0xaa, 0x25, 0xf2, 0xa1, 0xc6, 0xd1, 0x20, 0x54, 0x2e, 0xd4, 0x98, 0xc6, 0x70, 0x82, - 0x2b, 0x74, 0x74, 0x82, 0xfb, 0x0d, 0x0b, 0x52, 0x67, 0xd5, 0x31, 0x08, 0x4d, 0x4b, 0xa6, 0xd0, - 0xf4, 0x78, 0x37, 0xbd, 0x99, 0x23, 0x2f, 0xfd, 0xc5, 0x28, 0x9c, 0xca, 0x79, 0x5d, 0xb8, 0x0b, - 0xe3, 0x9b, 0x35, 0x62, 0x3e, 0x27, 0x6f, 0x17, 0x66, 0xa9, 0xed, 0xdb, 0x73, 0x9e, 0x86, 0x36, - 0x45, 0x82, 0xd3, 0x55, 0xa0, 0xcf, 0x5b, 0x70, 0xc2, 0xb9, 0x13, 0x2e, 0x50, 0xe1, 0xd7, 0xad, - 0xcd, 0x36, 0xfc, 0xda, 0x36, 0x95, 0x2c, 0xe4, 0xb2, 0x7a, 0x31, 0x53, 0x21, 0x79, 0xbb, 0x9a, - 0xa2, 0x37, 0xaa, 0x67, 0x49, 0xc7, 0xb3, 0xa8, 0x70, 0x66, 0x5d, 0x08, 0x8b, 0xfc, 0x28, 0xf4, - 0x6a, 0xdd, 0x26, 0xe0, 0x41, 0xd6, 0x33, 0x50, 0x2e, 0xcd, 0x49, 0x0c, 0x56, 0x7c, 0xd0, 0xa7, - 0xa1, 0xb4, 0x29, 0xdf, 0x36, 0x67, 0x48, 0x8b, 0x71, 0x47, 0xb6, 0x7f, 0xf1, 0xcd, 0xbd, 0x0a, - 0x14, 0x11, 0x8e, 0x99, 0xa2, 0xd7, 0xa0, 0xe8, 0x6d, 0x84, 0xed, 0xf2, 0x76, 0x27, 0xdc, 0x47, - 0x79, 0x58, 0x91, 0xd5, 0xc5, 0x2a, 0xa6, 0x05, 0xd1, 0x35, 0x28, 0x06, 0xeb, 0x75, 0xa1, 0x4d, - 0xcf, 0x5c, 0xa4, 0x78, 0x76, 0x3e, 0xa7, 0x55, 0x8c, 0x13, 0x9e, 0x9d, 0xc7, 0x94, 0x05, 0xaa, - 0x40, 0x2f, 0x7b, 0x92, 0x27, 0x64, 0xb3, 0xcc, 0x5b, 0x68, 0x9b, 0xa7, 0xad, 0x3c, 0xf6, 0x08, - 0x23, 0xc0, 0x9c, 0x11, 0x5a, 0x83, 0xbe, 0x1a, 0xcb, 0xf1, 0x2c, 0x84, 0xb1, 0xf7, 0x67, 0xea, - 0xcd, 0xdb, 0x24, 0xbf, 0x16, 0x6a, 0x64, 0x46, 0x81, 0x05, 0x2f, 0xc6, 0x95, 0x34, 0xb7, 0x36, - 0x42, 0xa6, 0x77, 0xcb, 0xe3, 0xda, 0x26, 0xa7, 0xbb, 0xe0, 0xca, 0x28, 0xb0, 0xe0, 0x85, 0x5e, - 0x81, 0xc2, 0x46, 0x4d, 0x3c, 0xb7, 0xcb, 0x54, 0xa0, 0x9b, 0x91, 0x61, 0x66, 0xfb, 0xee, 0xef, - 0x97, 0x0b, 0x8b, 0x73, 0xb8, 0xb0, 0x51, 0x43, 0xab, 0xd0, 0xbf, 0xc1, 0x63, 0x49, 0x08, 0x1d, - 0xf9, 0x93, 0xd9, 0x61, 0x2e, 0x52, 0xe1, 0x26, 0xf8, 0xd3, 0x2d, 0x81, 0xc0, 0x92, 0x09, 0x4b, - 0xd7, 0xa1, 0x62, 0x62, 0x88, 0x90, 0x7c, 0xd3, 0x87, 0x8b, 0x63, 0xc2, 0x65, 0xe5, 0x38, 0xb2, - 0x06, 0xd6, 0x38, 0xd2, 0x59, 0xed, 0xdc, 0x6b, 0x05, 0x2c, 0x5e, 0xbb, 0x88, 0xdd, 0x94, 0x39, - 0xab, 0x67, 0x24, 0x51, 0xbb, 0x59, 0xad, 0x88, 0x70, 0xcc, 0x14, 0x6d, 0xc3, 0xf0, 0x6e, 0xd8, - 0xdc, 0x22, 0x72, 0x49, 0xb3, 0x50, 0x4e, 0x39, 0xb2, 0xde, 0x2d, 0x41, 0xe8, 0x06, 0x51, 0xcb, - 0x69, 0xa4, 0x76, 0x21, 0x26, 0x97, 0xdf, 0xd2, 0x99, 0x61, 0x93, 0x37, 0xed, 0xfe, 0xb7, 0x5b, - 0xfe, 0xfa, 0x5e, 0x44, 0x44, 0x24, 0xbd, 0xcc, 0xee, 0x7f, 0x83, 0x93, 0xa4, 0xbb, 0x5f, 0x20, - 0xb0, 0x64, 0x82, 0x6e, 0x89, 0xee, 0x61, 0xbb, 0xe7, 0x58, 0x7e, 0x98, 0xde, 0x19, 0x49, 0x94, - 0xd3, 0x29, 0x6c, 0xb7, 0x8c, 0x59, 0xb1, 0x5d, 0xb2, 0xb9, 0xe5, 0x47, 0xbe, 0x97, 0xd8, 0xa1, - 0xc7, 0xf3, 0x77, 0xc9, 0x4a, 0x06, 0x7d, 0x7a, 0x97, 0xcc, 0xa2, 0xc2, 0x99, 0x75, 0xa1, 0x3a, - 0x8c, 0x34, 0xfd, 0x20, 0xba, 0xe3, 0x07, 0x72, 0x7e, 0xa1, 0x36, 0x3a, 0x3e, 0x83, 0x52, 0xd4, - 0xc8, 0x82, 0x54, 0x9a, 0x18, 0x9c, 0xe0, 0x89, 0x3e, 0x06, 0xfd, 0x61, 0xcd, 0x69, 0x90, 0xa5, - 0x1b, 0x93, 0x13, 0xf9, 0xc7, 0x4f, 0x95, 0x93, 0xe4, 0xcc, 0x2e, 0x1e, 0x0a, 0x84, 0x93, 0x60, - 0xc9, 0x0e, 0x2d, 0x42, 0x2f, 0x4b, 0x83, 0xc9, 0xc2, 0x3e, 0xe6, 0x44, 0x1b, 0x4e, 0x39, 0xf3, - 0xf3, 0xbd, 0x89, 0x81, 0x31, 0x2f, 0x4e, 0xd7, 0x80, 0xb8, 0xea, 0xfa, 0xe1, 0xe4, 0xc9, 0xfc, - 0x35, 0x20, 0x6e, 0xc8, 0x37, 0xaa, 0xed, 0xd6, 0x80, 0x22, 0xc2, 0x31, 0x53, 0xba, 0x33, 0xd3, - 0xdd, 0xf4, 0x54, 0x1b, 0x2f, 0xb4, 0xdc, 0xbd, 0x94, 0xed, 0xcc, 0x74, 0x27, 0xa5, 0x2c, 0xec, - 0x3f, 0xee, 0x4f, 0xcb, 0x2c, 0x4c, 0x39, 0xf2, 0xbf, 0x5b, 0x29, 0xbb, 0xf9, 0x07, 0xba, 0xd5, - 0xd5, 0x1e, 0xe1, 0xb5, 0xee, 0xf3, 0x16, 0x9c, 0x6a, 0x66, 0x7e, 0x88, 0x10, 0x00, 0xba, 0x53, - 0xf9, 0xf2, 0x4f, 0x57, 0x21, 0x42, 0xb3, 0xf1, 0x38, 0xa7, 0xa6, 0xe4, 0xd5, 0xb9, 0xf8, 0x8e, - 0xaf, 0xce, 0x2b, 0x30, 0x50, 0xe3, 0xf7, 0x1c, 0x19, 0xda, 0xba, 0xab, 0x00, 0x77, 0x4c, 0x94, - 0x10, 0x17, 0xa4, 0x0d, 0xac, 0x58, 0xa0, 0x1f, 0xb6, 0xe0, 0x5c, 0xb2, 0xe9, 0x98, 0x30, 0xb4, - 0x88, 0x2b, 0xca, 0xf5, 0x32, 0x8b, 0xe2, 0xfb, 0x53, 0xf2, 0xbf, 0x41, 0x7c, 0xd0, 0x89, 0x00, - 0xb7, 0xaf, 0x0c, 0xcd, 0x67, 0x28, 0x86, 0xfa, 0x4c, 0x63, 0x58, 0x17, 0xca, 0xa1, 0x17, 0x61, - 0x68, 0xc7, 0x6f, 0x79, 0x91, 0x70, 0x5a, 0x13, 0x0e, 0x34, 0xcc, 0x71, 0x64, 0x45, 0x83, 0x63, - 0x83, 0x2a, 0xa1, 0x52, 0x1a, 0x78, 0x60, 0x95, 0xd2, 0x5b, 0x30, 0xe4, 0x69, 0x5e, 0xd6, 0x42, - 0x1e, 0xb8, 0x98, 0x1f, 0x13, 0x58, 0xf7, 0xc9, 0xe6, 0xad, 0xd4, 0x21, 0xd8, 0xe0, 0x76, 0xbc, - 0xde, 0x6c, 0x3f, 0x5f, 0xc8, 0x10, 0xea, 0xb9, 0x5a, 0xe9, 0xc3, 0xa6, 0x5a, 0xe9, 0x62, 0x52, - 0xad, 0x94, 0x32, 0x84, 0x18, 0x1a, 0xa5, 0xee, 0x53, 0x64, 0x75, 0x1d, 0x57, 0xf4, 0xbb, 0x2d, - 0x38, 0xcd, 0x34, 0xeb, 0xb4, 0x82, 0x77, 0xac, 0x4d, 0x7f, 0xe4, 0xfe, 0x7e, 0xf9, 0xf4, 0x72, - 0x36, 0x3b, 0x9c, 0x57, 0x8f, 0xdd, 0x80, 0x0b, 0x9d, 0x8e, 0x46, 0xe6, 0x41, 0x59, 0x57, 0xa6, - 0xf7, 0xd8, 0x83, 0xb2, 0xbe, 0x34, 0x8f, 0x19, 0xa6, 0xdb, 0xa8, 0x59, 0xf6, 0x7f, 0xb0, 0xa0, - 0x58, 0xf1, 0xeb, 0xc7, 0x70, 0xe9, 0xfe, 0x88, 0x71, 0xe9, 0x7e, 0x24, 0xfb, 0x50, 0xae, 0xe7, - 0x9a, 0x92, 0x16, 0x12, 0xa6, 0xa4, 0x73, 0x79, 0x0c, 0xda, 0x1b, 0x8e, 0x7e, 0xaa, 0x08, 0x83, - 0x15, 0xbf, 0xae, 0x9e, 0x2f, 0xfc, 0xe3, 0x07, 0x79, 0xbe, 0x90, 0x9b, 0xf4, 0x44, 0xe3, 0xcc, - 0x1c, 0x2f, 0xe5, 0xcb, 0xed, 0x6f, 0xb1, 0x57, 0x0c, 0xb7, 0x89, 0xbb, 0xb9, 0x15, 0x91, 0x7a, - 0xf2, 0x73, 0x8e, 0xef, 0x15, 0xc3, 0x1f, 0x17, 0x60, 0x34, 0x51, 0x3b, 0x6a, 0xc0, 0x70, 0x43, - 0x37, 0x54, 0x88, 0x79, 0xfa, 0x40, 0x36, 0x0e, 0xe1, 0x05, 0xae, 0x81, 0xb0, 0xc9, 0x1c, 0x4d, - 0x03, 0x28, 0xcb, 0xbd, 0x54, 0x57, 0xb3, 0x9b, 0x87, 0x32, 0xed, 0x87, 0x58, 0xa3, 0x40, 0x2f, - 0xc1, 0x60, 0xe4, 0x37, 0xfd, 0x86, 0xbf, 0xb9, 0x77, 0x9d, 0xc8, 0x80, 0x6a, 0xca, 0xb7, 0x73, - 0x2d, 0x46, 0x61, 0x9d, 0x0e, 0xdd, 0x85, 0x71, 0xc5, 0xa4, 0x7a, 0x04, 0xc6, 0x1b, 0xa6, 0xd9, - 0x58, 0x4d, 0x72, 0xc4, 0xe9, 0x4a, 0xec, 0x9f, 0x2d, 0xf2, 0x2e, 0xf6, 0x22, 0xf7, 0xbd, 0xd5, - 0xf0, 0xee, 0x5e, 0x0d, 0x5f, 0xb7, 0x60, 0x8c, 0xd6, 0xce, 0x1c, 0xd7, 0xa4, 0xa8, 0xa1, 0x22, - 0xa1, 0x5b, 0x6d, 0x22, 0xa1, 0x5f, 0xa4, 0xbb, 0x66, 0xdd, 0x6f, 0x45, 0x42, 0x7f, 0xa8, 0x6d, - 0x8b, 0x14, 0x8a, 0x05, 0x56, 0xd0, 0x91, 0x20, 0x10, 0x8f, 0x6d, 0x75, 0x3a, 0x12, 0x04, 0x58, - 0x60, 0x65, 0xa0, 0xf4, 0x9e, 0xec, 0x40, 0xe9, 0x3c, 0xde, 0xad, 0x70, 0x71, 0x12, 0x42, 0x9f, - 0x16, 0xef, 0x56, 0xfa, 0x3e, 0xc5, 0x34, 0xf6, 0x57, 0x8b, 0x30, 0x54, 0xf1, 0xeb, 0xb1, 0xd5, - 0xfe, 0x45, 0xc3, 0x6a, 0x7f, 0x21, 0x61, 0xb5, 0x1f, 0xd3, 0x69, 0xdf, 0xb3, 0xd1, 0x7f, 0xb3, - 0x6c, 0xf4, 0xbf, 0x6e, 0xb1, 0x51, 0x9b, 0x5f, 0xad, 0x72, 0x3f, 0x48, 0x74, 0x05, 0x06, 0xd9, - 0x06, 0xc3, 0x5e, 0x77, 0x4b, 0x53, 0x36, 0x4b, 0x5c, 0xb6, 0x1a, 0x83, 0xb1, 0x4e, 0x83, 0x2e, - 0xc1, 0x40, 0x48, 0x9c, 0xa0, 0xb6, 0xa5, 0x76, 0x57, 0x61, 0x77, 0xe6, 0x30, 0xac, 0xb0, 0xe8, - 0x8d, 0x38, 0xd4, 0x6a, 0x31, 0xff, 0xb5, 0xa8, 0xde, 0x1e, 0xbe, 0x44, 0xf2, 0xe3, 0xab, 0xda, - 0xb7, 0x01, 0xa5, 0xe9, 0xbb, 0x08, 0x06, 0x58, 0x36, 0x83, 0x01, 0x96, 0x52, 0x81, 0x00, 0xff, - 0xca, 0x82, 0x91, 0x8a, 0x5f, 0xa7, 0x4b, 0xf7, 0xdb, 0x69, 0x9d, 0xea, 0x71, 0xa6, 0xfb, 0xda, - 0xc4, 0x99, 0x7e, 0x0c, 0x7a, 0x2b, 0x7e, 0xbd, 0x43, 0xc0, 0xc2, 0xbf, 0x61, 0x41, 0x7f, 0xc5, - 0xaf, 0x1f, 0x83, 0x69, 0xe2, 0xc3, 0xa6, 0x69, 0xe2, 0x74, 0xce, 0xbc, 0xc9, 0xb1, 0x46, 0xfc, - 0xff, 0x3d, 0x30, 0x4c, 0xdb, 0xe9, 0x6f, 0xca, 0xa1, 0x34, 0xba, 0xcd, 0xea, 0xa2, 0xdb, 0xa8, - 0x14, 0xee, 0x37, 0x1a, 0xfe, 0x9d, 0xe4, 0xb0, 0x2e, 0x32, 0x28, 0x16, 0x58, 0xf4, 0x2c, 0x0c, - 0x34, 0x03, 0xb2, 0xeb, 0xfa, 0x42, 0xbc, 0xd5, 0x0c, 0x3d, 0x15, 0x01, 0xc7, 0x8a, 0x82, 0x5e, - 0x4d, 0x43, 0xd7, 0xa3, 0x47, 0x79, 0xcd, 0xf7, 0xea, 0x5c, 0x7b, 0x5f, 0x14, 0xc9, 0x50, 0x34, - 0x38, 0x36, 0xa8, 0xd0, 0x6d, 0x28, 0xb1, 0xff, 0x6c, 0xdb, 0x39, 0x7c, 0x1a, 0x66, 0x91, 0x1e, - 0x52, 0x30, 0xc0, 0x31, 0x2f, 0xf4, 0x3c, 0x40, 0x24, 0x13, 0x0a, 0x84, 0x22, 0x70, 0x9d, 0xba, - 0x0a, 0xa8, 0x54, 0x03, 0x21, 0xd6, 0xa8, 0xd0, 0x33, 0x50, 0x8a, 0x1c, 0xb7, 0xb1, 0xec, 0x7a, - 0xcc, 0xfe, 0x4b, 0xdb, 0x2f, 0xb2, 0x34, 0x0a, 0x20, 0x8e, 0xf1, 0x54, 0x14, 0x63, 0x41, 0x4d, - 0x78, 0x12, 0xfa, 0x01, 0x46, 0xcd, 0x44, 0xb1, 0x65, 0x05, 0xc5, 0x1a, 0x05, 0xda, 0x82, 0xb3, - 0xae, 0xc7, 0x12, 0x87, 0x90, 0xea, 0xb6, 0xdb, 0x5c, 0x5b, 0xae, 0xde, 0x22, 0x81, 0xbb, 0xb1, - 0x37, 0xeb, 0xd4, 0xb6, 0x89, 0x27, 0x13, 0xec, 0xca, 0xbc, 0xeb, 0x67, 0x97, 0xda, 0xd0, 0xe2, - 0xb6, 0x9c, 0xec, 0x17, 0xd8, 0x7c, 0xbf, 0x51, 0x45, 0x4f, 0x1b, 0x5b, 0xc7, 0x29, 0x7d, 0xeb, - 0x38, 0xd8, 0x2f, 0xf7, 0xdd, 0xa8, 0x6a, 0x31, 0x39, 0x5e, 0x86, 0x93, 0x15, 0xbf, 0x5e, 0xf1, - 0x83, 0x68, 0xd1, 0x0f, 0xee, 0x38, 0x41, 0x5d, 0x4e, 0xaf, 0xb2, 0x8c, 0x4a, 0x42, 0xf7, 0xcf, - 0x5e, 0xbe, 0xbb, 0x18, 0x11, 0x47, 0x5e, 0x60, 0x12, 0xdb, 0x21, 0xdf, 0xd2, 0xd5, 0x98, 0xec, - 0xa0, 0x52, 0xef, 0x5c, 0x75, 0x22, 0x82, 0x6e, 0xb0, 0x14, 0xfa, 0xf1, 0x31, 0x2a, 0x8a, 0x3f, - 0xa5, 0xa5, 0xd0, 0x8f, 0x91, 0x99, 0xe7, 0xae, 0x59, 0xde, 0xfe, 0x9c, 0xa8, 0x84, 0xeb, 0x01, - 0xb8, 0xbf, 0x62, 0x37, 0x39, 0xa8, 0x65, 0x6e, 0x8e, 0x42, 0x7e, 0x52, 0x07, 0x6e, 0x79, 0x6d, - 0x9b, 0x9b, 0xc3, 0xfe, 0x4e, 0x38, 0x95, 0xac, 0xbe, 0xeb, 0x44, 0xd8, 0x73, 0x30, 0x1e, 0xe8, - 0x05, 0xb5, 0x44, 0x67, 0x27, 0x79, 0x3e, 0x85, 0x04, 0x12, 0xa7, 0xe9, 0xed, 0x97, 0x60, 0x9c, - 0xde, 0x3d, 0x95, 0x20, 0xc7, 0x7a, 0xb9, 0x73, 0x78, 0x96, 0xff, 0xd8, 0xcb, 0x0e, 0xa2, 0x44, - 0xd6, 0x1b, 0xf4, 0x29, 0x18, 0x09, 0xc9, 0xb2, 0xeb, 0xb5, 0xee, 0x4a, 0xed, 0x53, 0x9b, 0x47, - 0xa4, 0xd5, 0x05, 0x9d, 0x92, 0xeb, 0xb0, 0x4d, 0x18, 0x4e, 0x70, 0x43, 0x3b, 0x30, 0x72, 0xc7, - 0xf5, 0xea, 0xfe, 0x9d, 0x50, 0xf2, 0x1f, 0xc8, 0x57, 0x65, 0xdf, 0xe6, 0x94, 0x89, 0x36, 0x1a, - 0xd5, 0xdd, 0x36, 0x98, 0xe1, 0x04, 0x73, 0xba, 0xd8, 0x83, 0x96, 0x37, 0x13, 0xde, 0x0c, 0x09, - 0x7f, 0x16, 0x28, 0x16, 0x3b, 0x96, 0x40, 0x1c, 0xe3, 0xe9, 0x62, 0x67, 0x7f, 0xae, 0x06, 0x7e, - 0x8b, 0xa7, 0x58, 0x11, 0x8b, 0x1d, 0x2b, 0x28, 0xd6, 0x28, 0xe8, 0x66, 0xc8, 0xfe, 0xad, 0xfa, - 0x1e, 0xf6, 0xfd, 0x48, 0x6e, 0x9f, 0x2c, 0x45, 0x98, 0x06, 0xc7, 0x06, 0x15, 0x5a, 0x04, 0x14, - 0xb6, 0x9a, 0xcd, 0x06, 0xf3, 0x4e, 0x73, 0x1a, 0x8c, 0x15, 0x77, 0xdb, 0x29, 0xf2, 0x10, 0xd1, - 0xd5, 0x14, 0x16, 0x67, 0x94, 0xa0, 0xe7, 0xe2, 0x86, 0x68, 0x6a, 0x2f, 0x6b, 0x2a, 0x37, 0x7b, - 0x55, 0x79, 0x3b, 0x25, 0x0e, 0x2d, 0x40, 0x7f, 0xb8, 0x17, 0xd6, 0xa2, 0x46, 0xd8, 0x2e, 0x21, - 0x5b, 0x95, 0x91, 0x68, 0xf9, 0x40, 0x79, 0x11, 0x2c, 0xcb, 0xa2, 0x1a, 0x4c, 0x08, 0x8e, 0x73, - 0x5b, 0x8e, 0xa7, 0xd2, 0x44, 0x71, 0x27, 0xfd, 0x2b, 0xf7, 0xf7, 0xcb, 0x13, 0xa2, 0x66, 0x1d, - 0x7d, 0xb0, 0x5f, 0xa6, 0x8b, 0x23, 0x03, 0x83, 0xb3, 0xb8, 0xf1, 0xc9, 0x57, 0xab, 0xf9, 0x3b, - 0xcd, 0x4a, 0xe0, 0x6f, 0xb8, 0x0d, 0xd2, 0xce, 0x74, 0x58, 0x35, 0x28, 0xc5, 0xe4, 0x33, 0x60, - 0x38, 0xc1, 0xcd, 0xfe, 0x1c, 0x93, 0x1d, 0xab, 0xee, 0xa6, 0xe7, 0x44, 0xad, 0x80, 0xa0, 0x1d, - 0x18, 0x6e, 0xb2, 0xdd, 0x45, 0x24, 0x3e, 0x11, 0x73, 0xfd, 0xc5, 0x2e, 0xd5, 0x4f, 0x77, 0x58, - 0xea, 0x36, 0xc3, 0xd5, 0xad, 0xa2, 0xb3, 0xc3, 0x26, 0x77, 0xfb, 0x5f, 0x9c, 0x61, 0xd2, 0x47, - 0x95, 0xeb, 0x94, 0xfa, 0xc5, 0x9b, 0x20, 0x71, 0x8d, 0x9d, 0xca, 0x57, 0xb0, 0xc6, 0xc3, 0x22, - 0xde, 0x15, 0x61, 0x59, 0x16, 0x7d, 0x12, 0x46, 0xe8, 0xad, 0x50, 0x49, 0x00, 0xe1, 0xe4, 0x89, - 0xfc, 0xd8, 0x2d, 0x8a, 0x4a, 0x4f, 0x8a, 0xa4, 0x17, 0xc6, 0x09, 0x66, 0xe8, 0x0d, 0xe6, 0x5a, - 0x26, 0x59, 0x17, 0xba, 0x61, 0xad, 0x7b, 0x91, 0x49, 0xb6, 0x1a, 0x13, 0xd4, 0x82, 0x89, 0x74, - 0xea, 0xc7, 0x70, 0xd2, 0xce, 0x17, 0xaf, 0xd3, 0xd9, 0x1b, 0xe3, 0xec, 0x35, 0x69, 0x5c, 0x88, - 0xb3, 0xf8, 0xa3, 0xe5, 0x64, 0x62, 0xbe, 0xa2, 0xa1, 0xf7, 0x4d, 0x25, 0xe7, 0x1b, 0x6e, 0x9b, - 0x93, 0x6f, 0x13, 0xce, 0x69, 0xb9, 0xcd, 0xae, 0x06, 0x0e, 0x73, 0xde, 0x70, 0xd9, 0x76, 0xaa, - 0xc9, 0x45, 0x8f, 0xde, 0xdf, 0x2f, 0x9f, 0x5b, 0x6b, 0x47, 0x88, 0xdb, 0xf3, 0x41, 0x37, 0xe0, - 0x24, 0x8f, 0x3c, 0x30, 0x4f, 0x9c, 0x7a, 0xc3, 0xf5, 0x94, 0xe0, 0xc5, 0x97, 0xfc, 0x99, 0xfb, - 0xfb, 0xe5, 0x93, 0x33, 0x59, 0x04, 0x38, 0xbb, 0x1c, 0xfa, 0x30, 0x94, 0xea, 0x5e, 0x28, 0xfa, - 0xa0, 0xcf, 0x48, 0x1f, 0x57, 0x9a, 0x5f, 0xad, 0xaa, 0xef, 0x8f, 0xff, 0xe0, 0xb8, 0x00, 0xda, - 0xe4, 0xb6, 0x01, 0xa5, 0x2d, 0xea, 0x4f, 0x45, 0x5e, 0x4b, 0x2a, 0x54, 0x8d, 0xb7, 0xc7, 0xdc, - 0x28, 0xa6, 0x9e, 0xe4, 0x18, 0xcf, 0x92, 0x0d, 0xc6, 0xe8, 0x75, 0x40, 0x22, 0x4d, 0xc1, 0x4c, - 0x8d, 0x65, 0xd5, 0x61, 0x47, 0xe3, 0x80, 0xf9, 0x1a, 0xb6, 0x9a, 0xa2, 0xc0, 0x19, 0xa5, 0xd0, - 0x35, 0xba, 0xab, 0xe8, 0x50, 0xb1, 0x6b, 0xa9, 0x24, 0xa5, 0xf3, 0xa4, 0x19, 0x10, 0xe6, 0x63, - 0x66, 0x72, 0xc4, 0x89, 0x72, 0xa8, 0x0e, 0x67, 0x9d, 0x56, 0xe4, 0x33, 0xb3, 0x8b, 0x49, 0xba, - 0xe6, 0x6f, 0x13, 0x8f, 0x59, 0x3c, 0x07, 0x66, 0x2f, 0x50, 0xc9, 0x6e, 0xa6, 0x0d, 0x1d, 0x6e, - 0xcb, 0x85, 0x4a, 0xe4, 0x2a, 0x2b, 0x39, 0x98, 0xf1, 0xe4, 0x32, 0x32, 0x93, 0xbf, 0x04, 0x83, - 0x5b, 0x7e, 0x18, 0xad, 0x92, 0xe8, 0x8e, 0x1f, 0x6c, 0x8b, 0xb8, 0xc8, 0x71, 0x2c, 0xfa, 0x18, - 0x85, 0x75, 0x3a, 0x7a, 0xe5, 0x66, 0xfe, 0x38, 0x4b, 0xf3, 0xcc, 0x15, 0x62, 0x20, 0xde, 0x63, - 0xae, 0x71, 0x30, 0x96, 0x78, 0x49, 0xba, 0x54, 0x99, 0x63, 0x6e, 0x0d, 0x09, 0xd2, 0xa5, 0xca, - 0x1c, 0x96, 0x78, 0x3a, 0x5d, 0xc3, 0x2d, 0x27, 0x20, 0x95, 0xc0, 0xaf, 0x91, 0x50, 0xcb, 0x80, - 0xf0, 0x08, 0x8f, 0xfa, 0x4c, 0xa7, 0x6b, 0x35, 0x8b, 0x00, 0x67, 0x97, 0x43, 0x24, 0x9d, 0xd7, - 0x6f, 0x24, 0xdf, 0x1e, 0x95, 0x96, 0x67, 0xba, 0x4c, 0xed, 0xe7, 0xc1, 0x98, 0xca, 0x28, 0xc8, - 0xe3, 0x3c, 0x87, 0x93, 0xa3, 0x6c, 0x6e, 0x77, 0x1f, 0x24, 0x5a, 0x59, 0xf8, 0x96, 0x12, 0x9c, - 0x70, 0x8a, 0xb7, 0x11, 0x32, 0x70, 0xac, 0x63, 0xc8, 0xc0, 0xcb, 0x50, 0x0a, 0x5b, 0xeb, 0x75, - 0x7f, 0xc7, 0x71, 0x3d, 0xe6, 0xd6, 0xa0, 0xdd, 0xfd, 0xaa, 0x12, 0x81, 0x63, 0x1a, 0xb4, 0x08, - 0x03, 0x8e, 0x34, 0xdf, 0xa1, 0xfc, 0x20, 0x51, 0xca, 0x68, 0xc7, 0xe3, 0xa6, 0x48, 0x83, 0x9d, - 0x2a, 0x8b, 0x5e, 0x85, 0x61, 0xf1, 0x72, 0x5e, 0x24, 0xe1, 0x9d, 0x30, 0x9f, 0x37, 0x56, 0x75, - 0x24, 0x36, 0x69, 0xd1, 0x4d, 0x18, 0x8c, 0xfc, 0x06, 0x7b, 0xa3, 0x47, 0xc5, 0xbc, 0x53, 0xf9, - 0xe1, 0x0e, 0xd7, 0x14, 0x99, 0xae, 0xb5, 0x56, 0x45, 0xb1, 0xce, 0x07, 0xad, 0xf1, 0xf9, 0xce, - 0xf2, 0x1d, 0x90, 0x50, 0x64, 0x71, 0x3d, 0x97, 0xe7, 0x93, 0xc6, 0xc8, 0xcc, 0xe5, 0x20, 0x4a, - 0x62, 0x9d, 0x0d, 0xba, 0x0a, 0xe3, 0xcd, 0xc0, 0xf5, 0xd9, 0x9c, 0x50, 0x96, 0xdb, 0x49, 0x33, - 0xbb, 0x59, 0x25, 0x49, 0x80, 0xd3, 0x65, 0x58, 0xe0, 0x03, 0x01, 0x9c, 0x3c, 0xc3, 0x33, 0xb4, - 0xf0, 0xab, 0x34, 0x87, 0x61, 0x85, 0x45, 0x2b, 0x6c, 0x27, 0xe6, 0x5a, 0xa0, 0xc9, 0xa9, 0xfc, - 0xb8, 0x54, 0xba, 0xb6, 0x88, 0x0b, 0xaf, 0xea, 0x2f, 0x8e, 0x39, 0xa0, 0xba, 0x96, 0x18, 0x95, - 0x5e, 0x01, 0xc2, 0xc9, 0xb3, 0x6d, 0x9c, 0x22, 0x13, 0xb7, 0xb2, 0x58, 0x20, 0x30, 0xc0, 0x21, - 0x4e, 0xf0, 0x44, 0x1f, 0x85, 0x31, 0x11, 0x4d, 0x33, 0xee, 0xa6, 0x73, 0xf1, 0xcb, 0x07, 0x9c, - 0xc0, 0xe1, 0x14, 0x35, 0xcf, 0x90, 0xe2, 0xac, 0x37, 0x88, 0xd8, 0xfa, 0x96, 0x5d, 0x6f, 0x3b, - 0x9c, 0x3c, 0xcf, 0xf6, 0x07, 0x91, 0x21, 0x25, 0x89, 0xc5, 0x19, 0x25, 0xd0, 0x1a, 0x8c, 0x35, - 0x03, 0x42, 0x76, 0x98, 0xa0, 0x2f, 0xce, 0xb3, 0x32, 0x8f, 0xfb, 0x41, 0x5b, 0x52, 0x49, 0xe0, - 0x0e, 0x32, 0x60, 0x38, 0xc5, 0x01, 0xdd, 0x81, 0x01, 0x7f, 0x97, 0x04, 0x5b, 0xc4, 0xa9, 0x4f, - 0x5e, 0x68, 0xf3, 0x12, 0x47, 0x1c, 0x6e, 0x37, 0x04, 0x6d, 0xc2, 0xdb, 0x43, 0x82, 0x3b, 0x7b, - 0x7b, 0xc8, 0xca, 0xd0, 0xff, 0x61, 0xc1, 0x19, 0x69, 0x9c, 0xa9, 0x36, 0x69, 0xaf, 0xcf, 0xf9, - 0x5e, 0x18, 0x05, 0x3c, 0x52, 0xc5, 0xa3, 0xf9, 0xd1, 0x1b, 0xd6, 0x72, 0x0a, 0x29, 0x45, 0xf4, - 0x99, 0x3c, 0x8a, 0x10, 0xe7, 0xd7, 0x48, 0xaf, 0xa6, 0x21, 0x89, 0xe4, 0x66, 0x34, 0x13, 0x2e, - 0xbe, 0x31, 0xbf, 0x3a, 0xf9, 0x18, 0x0f, 0xb3, 0x41, 0x17, 0x43, 0x35, 0x89, 0xc4, 0x69, 0x7a, - 0x74, 0x05, 0x0a, 0x7e, 0x38, 0xf9, 0x78, 0x9b, 0x5c, 0xba, 0x7e, 0xfd, 0x46, 0x95, 0x7b, 0xfd, - 0xdd, 0xa8, 0xe2, 0x82, 0x1f, 0xca, 0x2c, 0x25, 0xf4, 0x3e, 0x16, 0x4e, 0x3e, 0xc1, 0xd5, 0x96, - 0x32, 0x4b, 0x09, 0x03, 0xe2, 0x18, 0x8f, 0xb6, 0x60, 0x34, 0x34, 0xee, 0xbd, 0xe1, 0xe4, 0x45, - 0xd6, 0x53, 0x4f, 0xe4, 0x0d, 0x9a, 0x41, 0xad, 0xa5, 0x0f, 0x30, 0xb9, 0xe0, 0x24, 0x5b, 0xbe, - 0xba, 0xb4, 0x9b, 0x77, 0x38, 0xf9, 0x64, 0x87, 0xd5, 0xa5, 0x11, 0xeb, 0xab, 0x4b, 0xe7, 0x81, - 0x13, 0x3c, 0xa7, 0xbe, 0x03, 0xc6, 0x53, 0xe2, 0xd2, 0x61, 0x3c, 0xdc, 0xa7, 0xb6, 0x61, 0xd8, - 0x98, 0x92, 0x0f, 0xd5, 0xbb, 0xe2, 0xb7, 0x4b, 0x50, 0x52, 0x56, 0x6f, 0x74, 0xd9, 0x74, 0xa8, - 0x38, 0x93, 0x74, 0xa8, 0x18, 0xa8, 0xf8, 0x75, 0xc3, 0x87, 0x62, 0x2d, 0x23, 0x18, 0x63, 0xde, - 0x06, 0xd8, 0xfd, 0x23, 0x15, 0xcd, 0x94, 0x50, 0xec, 0xda, 0x33, 0xa3, 0xa7, 0xad, 0x75, 0xe2, - 0x2a, 0x8c, 0x7b, 0x3e, 0x93, 0xd1, 0x49, 0x5d, 0x0a, 0x60, 0x4c, 0xce, 0x2a, 0xe9, 0xd1, 0x8d, - 0x12, 0x04, 0x38, 0x5d, 0x86, 0x56, 0xc8, 0x05, 0xa5, 0xa4, 0x39, 0x84, 0xcb, 0x51, 0x58, 0x60, - 0xe9, 0xdd, 0x90, 0xff, 0x0a, 0x27, 0xc7, 0xf2, 0xef, 0x86, 0xbc, 0x50, 0x52, 0x18, 0x0b, 0xa5, - 0x30, 0xc6, 0xb4, 0xff, 0x4d, 0xbf, 0xbe, 0x54, 0x11, 0x62, 0xbe, 0x16, 0x49, 0xb8, 0xbe, 0x54, - 0xc1, 0x1c, 0x87, 0x66, 0xa0, 0x8f, 0xfd, 0x08, 0x27, 0x87, 0xf2, 0xa3, 0xe1, 0xb0, 0x12, 0x5a, - 0x96, 0x34, 0x56, 0x00, 0x8b, 0x82, 0x4c, 0xbb, 0x4b, 0xef, 0x46, 0x4c, 0xbb, 0xdb, 0xff, 0x80, - 0xda, 0x5d, 0xc9, 0x00, 0xc7, 0xbc, 0xd0, 0x5d, 0x38, 0x69, 0xdc, 0x47, 0xd5, 0xab, 0x1d, 0xc8, - 0x37, 0xfc, 0x26, 0x88, 0x67, 0xcf, 0x89, 0x46, 0x9f, 0x5c, 0xca, 0xe2, 0x84, 0xb3, 0x2b, 0x40, - 0x0d, 0x18, 0xaf, 0xa5, 0x6a, 0x1d, 0xe8, 0xbe, 0x56, 0x35, 0x2f, 0xd2, 0x35, 0xa6, 0x19, 0xa3, - 0x57, 0x61, 0xe0, 0x6d, 0x3f, 0x64, 0x47, 0xa4, 0xb8, 0x9a, 0xc8, 0x70, 0x0e, 0x03, 0x6f, 0xdc, - 0xa8, 0x32, 0xf8, 0xc1, 0x7e, 0x79, 0xb0, 0xe2, 0xd7, 0xe5, 0x5f, 0xac, 0x0a, 0xa0, 0xef, 0xb7, - 0x60, 0x2a, 0x7d, 0xe1, 0x55, 0x8d, 0x1e, 0xee, 0xbe, 0xd1, 0xb6, 0xa8, 0x74, 0x6a, 0x21, 0x97, - 0x1d, 0x6e, 0x53, 0x15, 0xfa, 0x10, 0x5d, 0x4f, 0xa1, 0x7b, 0x8f, 0x88, 0x14, 0xb3, 0x8f, 0xc6, - 0xeb, 0x89, 0x42, 0x0f, 0xf6, 0xcb, 0xa3, 0x7c, 0x67, 0x74, 0xef, 0xc9, 0xe7, 0x4d, 0xa2, 0x00, - 0xfa, 0x4e, 0x38, 0x19, 0xa4, 0x35, 0xa8, 0x44, 0x0a, 0xe1, 0x4f, 0x77, 0xb3, 0xcb, 0x26, 0x07, - 0x1c, 0x67, 0x31, 0xc4, 0xd9, 0xf5, 0xd8, 0xbf, 0x62, 0x31, 0xfd, 0xb6, 0x68, 0x16, 0x09, 0x5b, - 0x8d, 0xe3, 0x48, 0x6c, 0xbd, 0x60, 0xd8, 0x8e, 0x1f, 0xd8, 0xb1, 0xe8, 0x1f, 0x59, 0xcc, 0xb1, - 0xe8, 0x18, 0x5f, 0x31, 0xbd, 0x01, 0x03, 0x91, 0x4c, 0x38, 0xde, 0x26, 0x17, 0xb7, 0xd6, 0x28, - 0xe6, 0x5c, 0xa5, 0x2e, 0x39, 0x2a, 0xb7, 0xb8, 0x62, 0x63, 0xff, 0x7d, 0x3e, 0x02, 0x12, 0x73, - 0x0c, 0x26, 0xba, 0x79, 0xd3, 0x44, 0x57, 0xee, 0xf0, 0x05, 0x39, 0xa6, 0xba, 0xbf, 0x67, 0xb6, - 0x9b, 0x29, 0xf7, 0xde, 0xed, 0x1e, 0x6d, 0xf6, 0x17, 0x2c, 0x80, 0x38, 0xc8, 0x7c, 0x17, 0x29, - 0x25, 0x5f, 0xa6, 0xd7, 0x1a, 0x3f, 0xf2, 0x6b, 0x7e, 0x43, 0x18, 0x28, 0xce, 0xc6, 0x56, 0x42, - 0x0e, 0x3f, 0xd0, 0x7e, 0x63, 0x45, 0x8d, 0xca, 0x32, 0xa4, 0x65, 0x31, 0xb6, 0x5b, 0x1b, 0xe1, - 0x2c, 0xbf, 0x64, 0xc1, 0x89, 0x2c, 0x97, 0x78, 0x7a, 0x49, 0xe6, 0x6a, 0x4e, 0xe5, 0x6d, 0xa8, - 0x46, 0xf3, 0x96, 0x80, 0x63, 0x45, 0xd1, 0x75, 0xae, 0xce, 0xc3, 0x45, 0x77, 0xbf, 0x01, 0xc3, - 0x95, 0x80, 0x68, 0xf2, 0xc5, 0x6b, 0x3c, 0x4c, 0x0a, 0x6f, 0xcf, 0xb3, 0x87, 0x0e, 0x91, 0x62, - 0x7f, 0xb9, 0x00, 0x27, 0xb8, 0xd3, 0xce, 0xcc, 0xae, 0xef, 0xd6, 0x2b, 0x7e, 0x5d, 0x3c, 0x64, - 0x7c, 0x13, 0x86, 0x9a, 0x9a, 0x6e, 0xba, 0x5d, 0xa4, 0x62, 0x5d, 0x87, 0x1d, 0x6b, 0xd3, 0x74, - 0x28, 0x36, 0x78, 0xa1, 0x3a, 0x0c, 0x91, 0x5d, 0xb7, 0xa6, 0x3c, 0x3f, 0x0a, 0x87, 0x3e, 0xa4, - 0x55, 0x2d, 0x0b, 0x1a, 0x1f, 0x6c, 0x70, 0x7d, 0x08, 0x19, 0xf4, 0xed, 0x1f, 0xb5, 0xe0, 0x74, - 0x4e, 0x5c, 0x63, 0x5a, 0xdd, 0x1d, 0xe6, 0x1e, 0x25, 0xa6, 0xad, 0xaa, 0x8e, 0x3b, 0x4d, 0x61, - 0x81, 0x45, 0x1f, 0x03, 0xe0, 0x4e, 0x4f, 0xc4, 0xab, 0x75, 0x0c, 0x00, 0x6b, 0xc4, 0xae, 0xd4, - 0xc2, 0x10, 0xca, 0xf2, 0x58, 0xe3, 0x65, 0x7f, 0xa9, 0x07, 0x7a, 0x99, 0x93, 0x0d, 0xaa, 0x40, - 0xff, 0x16, 0xcf, 0x54, 0xd5, 0x76, 0xdc, 0x28, 0xad, 0x4c, 0x7e, 0x15, 0x8f, 0x9b, 0x06, 0xc5, - 0x92, 0x0d, 0x5a, 0x81, 0x09, 0x9e, 0x30, 0xac, 0x31, 0x4f, 0x1a, 0xce, 0x9e, 0x54, 0xfb, 0xf2, - 0x1c, 0xd8, 0x4a, 0xfd, 0xbd, 0x94, 0x26, 0xc1, 0x59, 0xe5, 0xd0, 0x6b, 0x30, 0x42, 0xaf, 0xe1, - 0x7e, 0x2b, 0x92, 0x9c, 0x78, 0xaa, 0x30, 0x75, 0x33, 0x59, 0x33, 0xb0, 0x38, 0x41, 0x8d, 0x5e, - 0x85, 0xe1, 0x66, 0x4a, 0xc1, 0xdd, 0x1b, 0x6b, 0x82, 0x4c, 0xa5, 0xb6, 0x49, 0xcb, 0xbc, 0xe2, - 0x5b, 0xec, 0x0d, 0xc0, 0xda, 0x56, 0x40, 0xc2, 0x2d, 0xbf, 0x51, 0x67, 0x12, 0x70, 0xaf, 0xe6, - 0x15, 0x9f, 0xc0, 0xe3, 0x54, 0x09, 0xca, 0x65, 0xc3, 0x71, 0x1b, 0xad, 0x80, 0xc4, 0x5c, 0xfa, - 0x4c, 0x2e, 0x8b, 0x09, 0x3c, 0x4e, 0x95, 0xe8, 0xac, 0xb9, 0xef, 0x3f, 0x1a, 0xcd, 0xbd, 0xfd, - 0xd3, 0x05, 0x30, 0x86, 0xf6, 0xdb, 0x37, 0x85, 0x19, 0xfd, 0xb2, 0xcd, 0xa0, 0x59, 0x13, 0x0e, - 0x65, 0x99, 0x5f, 0x16, 0xe7, 0x2f, 0xe6, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8a, 0xae, 0xf1, 0x93, - 0x95, 0xc0, 0xa7, 0x87, 0x9c, 0x0c, 0xa4, 0xa7, 0x1e, 0x9f, 0xf4, 0xcb, 0x20, 0x03, 0x6d, 0x42, - 0xce, 0x0a, 0xf7, 0x7c, 0xce, 0xc1, 0xf0, 0xbd, 0xaa, 0x8a, 0x68, 0x1f, 0x92, 0x0b, 0xba, 0x02, - 0x83, 0x22, 0x2f, 0x15, 0x7b, 0x23, 0xc1, 0x17, 0x13, 0xf3, 0x15, 0x9b, 0x8f, 0xc1, 0x58, 0xa7, - 0xb1, 0x7f, 0xa0, 0x00, 0x13, 0x19, 0x8f, 0xdc, 0xf8, 0x31, 0xb2, 0xe9, 0x86, 0x91, 0x4a, 0x91, - 0xac, 0x1d, 0x23, 0x1c, 0x8e, 0x15, 0x05, 0xdd, 0xab, 0xf8, 0x41, 0x95, 0x3c, 0x9c, 0xc4, 0x23, - 0x12, 0x81, 0x3d, 0x64, 0xb2, 0xe1, 0x0b, 0xd0, 0xd3, 0x0a, 0x89, 0x0c, 0x16, 0xad, 0x8e, 0x6d, - 0x66, 0xd6, 0x66, 0x18, 0x7a, 0x05, 0xdc, 0x54, 0x16, 0x62, 0xed, 0x0a, 0xc8, 0x6d, 0xc4, 0x1c, - 0x47, 0x1b, 0x17, 0x11, 0xcf, 0xf1, 0x22, 0x71, 0x51, 0x8c, 0xa3, 0x9e, 0x32, 0x28, 0x16, 0x58, - 0xfb, 0x8b, 0x45, 0x38, 0x93, 0xfb, 0xec, 0x95, 0x36, 0x7d, 0xc7, 0xf7, 0xdc, 0xc8, 0x57, 0x4e, - 0x78, 0x3c, 0xd2, 0x29, 0x69, 0x6e, 0xad, 0x08, 0x38, 0x56, 0x14, 0xe8, 0x22, 0xf4, 0x32, 0xa5, - 0x78, 0x2a, 0x59, 0xf4, 0xec, 0x3c, 0x0f, 0x7d, 0xc7, 0xd1, 0x5d, 0xe7, 0xf7, 0x7f, 0x8c, 0x4a, - 0x30, 0x7e, 0x23, 0x79, 0xa0, 0xd0, 0xe6, 0xfa, 0x7e, 0x03, 0x33, 0x24, 0x7a, 0x42, 0xf4, 0x57, - 0xc2, 0xeb, 0x0c, 0x3b, 0x75, 0x3f, 0xd4, 0x3a, 0xed, 0x29, 0xe8, 0xdf, 0x26, 0x7b, 0x81, 0xeb, - 0x6d, 0x26, 0xbd, 0x11, 0xaf, 0x73, 0x30, 0x96, 0x78, 0x33, 0x6f, 0x69, 0xff, 0x51, 0x27, 0xe6, - 0x1f, 0xe8, 0x28, 0x9e, 0xfc, 0x50, 0x11, 0x46, 0xf1, 0xec, 0xfc, 0x7b, 0x03, 0x71, 0x33, 0x3d, - 0x10, 0x47, 0x9d, 0x98, 0xbf, 0xf3, 0x68, 0xfc, 0xa2, 0x05, 0xa3, 0x2c, 0x3b, 0x96, 0x88, 0x59, - 0xe1, 0xfa, 0xde, 0x31, 0x5c, 0x05, 0x1e, 0x83, 0xde, 0x80, 0x56, 0x9a, 0xcc, 0x12, 0xcd, 0x5a, - 0x82, 0x39, 0x0e, 0x9d, 0x85, 0x1e, 0xd6, 0x04, 0x3a, 0x78, 0x43, 0x7c, 0x0b, 0x9e, 0x77, 0x22, - 0x07, 0x33, 0x28, 0x0b, 0xfc, 0x86, 0x49, 0xb3, 0xe1, 0xf2, 0x46, 0xc7, 0x2e, 0x0b, 0xef, 0x8e, - 0x80, 0x18, 0x99, 0x4d, 0x7b, 0x67, 0x81, 0xdf, 0xb2, 0x59, 0xb6, 0xbf, 0x66, 0xff, 0x79, 0x01, - 0xce, 0x67, 0x96, 0xeb, 0x3a, 0xf0, 0x5b, 0xfb, 0xd2, 0x0f, 0x33, 0xff, 0x51, 0xf1, 0x18, 0x7d, - 0xbd, 0x7b, 0xba, 0x95, 0xfe, 0x7b, 0xbb, 0x88, 0xc7, 0x96, 0xd9, 0x65, 0xef, 0x92, 0x78, 0x6c, - 0x99, 0x6d, 0xcb, 0x51, 0x13, 0xfc, 0x75, 0x21, 0xe7, 0x5b, 0x98, 0xc2, 0xe0, 0x12, 0xdd, 0x67, - 0x18, 0x32, 0x94, 0x97, 0x70, 0xbe, 0xc7, 0x70, 0x18, 0x56, 0x58, 0x34, 0x03, 0xa3, 0x3b, 0xae, - 0x47, 0x37, 0x9f, 0x3d, 0x53, 0x14, 0x57, 0xb6, 0x8c, 0x15, 0x13, 0x8d, 0x93, 0xf4, 0xc8, 0xd5, - 0x62, 0xb5, 0xf1, 0xaf, 0x7b, 0xf5, 0x50, 0xab, 0x6e, 0xda, 0x74, 0xe7, 0x50, 0xbd, 0x98, 0x11, - 0xb7, 0x6d, 0x45, 0xd3, 0x13, 0x15, 0xbb, 0xd7, 0x13, 0x0d, 0x65, 0xeb, 0x88, 0xa6, 0x5e, 0x85, - 0xe1, 0x07, 0xb6, 0x8d, 0xd8, 0x5f, 0x2f, 0xc2, 0x23, 0x6d, 0x96, 0x3d, 0xdf, 0xeb, 0x8d, 0x31, - 0xd0, 0xf6, 0xfa, 0xd4, 0x38, 0x54, 0xe0, 0xc4, 0x46, 0xab, 0xd1, 0xd8, 0x63, 0x4f, 0xa0, 0x48, - 0x5d, 0x52, 0x08, 0x99, 0x52, 0x2a, 0x47, 0x4e, 0x2c, 0x66, 0xd0, 0xe0, 0xcc, 0x92, 0xf4, 0x8a, - 0x45, 0x4f, 0x92, 0x3d, 0xc5, 0x2a, 0x71, 0xc5, 0xc2, 0x3a, 0x12, 0x9b, 0xb4, 0xe8, 0x2a, 0x8c, - 0x3b, 0xbb, 0x8e, 0xcb, 0x03, 0xde, 0x4b, 0x06, 0xfc, 0x8e, 0xa5, 0x74, 0xd1, 0x33, 0x49, 0x02, - 0x9c, 0x2e, 0x83, 0x5e, 0x07, 0xe4, 0xaf, 0xb3, 0x87, 0x12, 0xf5, 0xab, 0xc4, 0x13, 0x56, 0x77, - 0x36, 0x76, 0xc5, 0x78, 0x4b, 0xb8, 0x91, 0xa2, 0xc0, 0x19, 0xa5, 0x12, 0x81, 0xc9, 0xfa, 0xf2, - 0x03, 0x93, 0xb5, 0xdf, 0x17, 0x3b, 0xa6, 0xde, 0xba, 0x02, 0xc3, 0x87, 0x74, 0xff, 0xb5, 0xff, - 0x8d, 0x05, 0x4a, 0x41, 0x6c, 0x46, 0xfd, 0x7d, 0x95, 0xf9, 0x27, 0x73, 0xd5, 0xb6, 0x16, 0x2d, - 0xe9, 0xa4, 0xe6, 0x9f, 0x1c, 0x23, 0xb1, 0x49, 0xcb, 0xe7, 0x90, 0xe6, 0x57, 0x6c, 0xdc, 0x0a, - 0x44, 0x68, 0x42, 0x45, 0x81, 0x3e, 0x0e, 0xfd, 0x75, 0x77, 0xd7, 0x0d, 0x85, 0x72, 0xec, 0xd0, - 0xc6, 0xb8, 0x78, 0xeb, 0x9c, 0xe7, 0x6c, 0xb0, 0xe4, 0x67, 0xff, 0x50, 0x21, 0xee, 0x93, 0x37, - 0x5a, 0x7e, 0xe4, 0x1c, 0xc3, 0x49, 0x7e, 0xd5, 0x38, 0xc9, 0x9f, 0x68, 0x17, 0x9f, 0x91, 0x35, - 0x29, 0xf7, 0x04, 0xbf, 0x91, 0x38, 0xc1, 0x9f, 0xec, 0xcc, 0xaa, 0xfd, 0xc9, 0xfd, 0x0f, 0x2c, - 0x18, 0x37, 0xe8, 0x8f, 0xe1, 0x00, 0x59, 0x34, 0x0f, 0x90, 0x47, 0x3b, 0x7e, 0x43, 0xce, 0xc1, - 0xf1, 0xbd, 0xc5, 0x44, 0xdb, 0xd9, 0x81, 0xf1, 0x36, 0xf4, 0x6c, 0x39, 0x41, 0xbd, 0x5d, 0x3e, - 0x9a, 0x54, 0xa1, 0xe9, 0x6b, 0x4e, 0x20, 0x3c, 0x15, 0x9e, 0x95, 0xbd, 0x4e, 0x41, 0x1d, 0xbd, - 0x14, 0x58, 0x55, 0xe8, 0x65, 0xe8, 0x0b, 0x6b, 0x7e, 0x53, 0xbd, 0x99, 0xba, 0xc0, 0x3a, 0x9a, - 0x41, 0x0e, 0xf6, 0xcb, 0xc8, 0xac, 0x8e, 0x82, 0xb1, 0xa0, 0x47, 0x6f, 0xc2, 0x30, 0xfb, 0xa5, - 0xdc, 0x06, 0x8b, 0xf9, 0x1a, 0x8c, 0xaa, 0x4e, 0xc8, 0x7d, 0x6a, 0x0d, 0x10, 0x36, 0x59, 0x4d, - 0x6d, 0x42, 0x49, 0x7d, 0xd6, 0x43, 0xb5, 0x76, 0xff, 0xab, 0x22, 0x4c, 0x64, 0xcc, 0x39, 0x14, - 0x1a, 0x23, 0x71, 0xa5, 0xcb, 0xa9, 0xfa, 0x0e, 0xc7, 0x22, 0x64, 0x17, 0xa8, 0xba, 0x98, 0x5b, - 0x5d, 0x57, 0x7a, 0x33, 0x24, 0xc9, 0x4a, 0x29, 0xa8, 0x73, 0xa5, 0xb4, 0xb2, 0x63, 0xeb, 0x6a, - 0x5a, 0x91, 0x6a, 0xe9, 0x43, 0x1d, 0xd3, 0x5f, 0xef, 0x81, 0x13, 0x59, 0x21, 0x63, 0xd1, 0x67, - 0x13, 0xd9, 0x90, 0x5f, 0xec, 0x36, 0xd8, 0x2c, 0x4f, 0x91, 0x2c, 0xc2, 0x40, 0x4e, 0x9b, 0xf9, - 0x91, 0x3b, 0x76, 0xb3, 0xa8, 0x93, 0x05, 0xa0, 0x09, 0x78, 0x16, 0x6b, 0xb9, 0x7d, 0x7c, 0xa0, - 0xeb, 0x06, 0x88, 0xf4, 0xd7, 0x61, 0xc2, 0x25, 0x49, 0x82, 0x3b, 0xbb, 0x24, 0xc9, 0x9a, 0xd1, - 0x12, 0xf4, 0xd5, 0xb8, 0xaf, 0x4b, 0xb1, 0xf3, 0x16, 0xc6, 0x1d, 0x5d, 0xd4, 0x06, 0x2c, 0x1c, - 0x5c, 0x04, 0x83, 0x29, 0x17, 0x06, 0xb5, 0x8e, 0x79, 0xa8, 0x93, 0x67, 0x9b, 0x1e, 0x7c, 0x5a, - 0x17, 0x3c, 0xd4, 0x09, 0xf4, 0xa3, 0x16, 0x24, 0x1e, 0xbc, 0x28, 0xa5, 0x9c, 0x95, 0xab, 0x94, - 0xbb, 0x00, 0x3d, 0x81, 0xdf, 0x20, 0xc9, 0x0c, 0xc4, 0xd8, 0x6f, 0x10, 0xcc, 0x30, 0x94, 0x22, - 0x8a, 0x55, 0x2d, 0x43, 0xfa, 0x35, 0x52, 0x5c, 0x10, 0x1f, 0x83, 0xde, 0x06, 0xd9, 0x25, 0x8d, - 0x64, 0xa2, 0xb8, 0x65, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x62, 0x0f, 0x9c, 0x6b, 0x1b, 0x0d, 0x8a, - 0x5e, 0xc6, 0x36, 0x9d, 0x88, 0xdc, 0x71, 0xf6, 0x92, 0x19, 0x9d, 0xae, 0x72, 0x30, 0x96, 0x78, - 0xf6, 0xfc, 0x93, 0x27, 0x66, 0x48, 0xa8, 0x30, 0x45, 0x3e, 0x06, 0x81, 0x35, 0x55, 0x62, 0xc5, - 0xa3, 0x50, 0x89, 0x3d, 0x0f, 0x10, 0x86, 0x0d, 0xee, 0x16, 0x58, 0x17, 0xef, 0x4a, 0xe3, 0x04, - 0x1e, 0xd5, 0x65, 0x81, 0xc1, 0x1a, 0x15, 0x9a, 0x87, 0xb1, 0x66, 0xe0, 0x47, 0x5c, 0x23, 0x3c, - 0xcf, 0x3d, 0x67, 0x7b, 0xcd, 0x40, 0x3c, 0x95, 0x04, 0x1e, 0xa7, 0x4a, 0xa0, 0x97, 0x60, 0x50, - 0x04, 0xe7, 0xa9, 0xf8, 0x7e, 0x43, 0x28, 0xa1, 0x94, 0x33, 0x69, 0x35, 0x46, 0x61, 0x9d, 0x4e, - 0x2b, 0xc6, 0xd4, 0xcc, 0xfd, 0x99, 0xc5, 0xb8, 0xaa, 0x59, 0xa3, 0x4b, 0x44, 0xa2, 0x1e, 0xe8, - 0x2a, 0x12, 0x75, 0xac, 0x96, 0x2b, 0x75, 0x6d, 0xf5, 0x84, 0x8e, 0x8a, 0xac, 0xaf, 0xf4, 0xc0, - 0x84, 0x98, 0x38, 0x0f, 0x7b, 0xba, 0xdc, 0x4c, 0x4f, 0x97, 0xa3, 0x50, 0xdc, 0xbd, 0x37, 0x67, - 0x8e, 0x7b, 0xce, 0xfc, 0xb0, 0x05, 0xa6, 0xa4, 0x86, 0xfe, 0xb7, 0xdc, 0x94, 0x78, 0x2f, 0xe5, - 0x4a, 0x7e, 0x71, 0x94, 0xdf, 0x77, 0x96, 0x1c, 0xcf, 0xfe, 0xd7, 0x16, 0x3c, 0xda, 0x91, 0x23, - 0x5a, 0x80, 0x12, 0x13, 0x27, 0xb5, 0x8b, 0xde, 0x93, 0xca, 0xb3, 0x5e, 0x22, 0x72, 0xa4, 0xdb, - 0xb8, 0x24, 0x5a, 0x48, 0xe5, 0x1e, 0x7c, 0x2a, 0x23, 0xf7, 0xe0, 0x49, 0xa3, 0x7b, 0x1e, 0x30, - 0xf9, 0xe0, 0x0f, 0xd2, 0x13, 0xc7, 0x78, 0xd5, 0x86, 0x3e, 0x60, 0x28, 0x1d, 0xed, 0x84, 0xd2, - 0x11, 0x99, 0xd4, 0xda, 0x19, 0xf2, 0x51, 0x18, 0x63, 0x51, 0xfb, 0xd8, 0x3b, 0x0f, 0xf1, 0xde, - 0xae, 0x10, 0xfb, 0x72, 0x2f, 0x27, 0x70, 0x38, 0x45, 0x6d, 0xff, 0x69, 0x11, 0xfa, 0xf8, 0xf2, - 0x3b, 0x86, 0xeb, 0xe5, 0x33, 0x50, 0x72, 0x77, 0x76, 0x5a, 0x3c, 0x9d, 0x5c, 0x6f, 0xec, 0x19, - 0xbc, 0x24, 0x81, 0x38, 0xc6, 0xa3, 0x45, 0xa1, 0xef, 0x6e, 0x13, 0x18, 0x98, 0x37, 0x7c, 0x7a, - 0xde, 0x89, 0x1c, 0x2e, 0x2b, 0xa9, 0x73, 0x36, 0xd6, 0x8c, 0xa3, 0x4f, 0x01, 0x84, 0x51, 0xe0, - 0x7a, 0x9b, 0x14, 0x26, 0x62, 0xab, 0x3f, 0xdd, 0x86, 0x5b, 0x55, 0x11, 0x73, 0x9e, 0xf1, 0x9e, - 0xa3, 0x10, 0x58, 0xe3, 0x88, 0xa6, 0x8d, 0x93, 0x7e, 0x2a, 0x31, 0x76, 0xc0, 0xb9, 0xc6, 0x63, - 0x36, 0xf5, 0x41, 0x28, 0x29, 0xe6, 0x9d, 0xb4, 0x5f, 0x43, 0xba, 0x58, 0xf4, 0x11, 0x18, 0x4d, - 0xb4, 0xed, 0x50, 0xca, 0xb3, 0x5f, 0xb2, 0x60, 0x94, 0x37, 0x66, 0xc1, 0xdb, 0x15, 0xa7, 0xc1, - 0x3d, 0x38, 0xd1, 0xc8, 0xd8, 0x95, 0xc5, 0xf0, 0x77, 0xbf, 0x8b, 0x2b, 0x65, 0x59, 0x16, 0x16, - 0x67, 0xd6, 0x81, 0x2e, 0xd1, 0x15, 0x47, 0x77, 0x5d, 0xa7, 0x21, 0xe2, 0x1b, 0x0c, 0xf1, 0xd5, - 0xc6, 0x61, 0x58, 0x61, 0xed, 0x3f, 0xb0, 0x60, 0x9c, 0xb7, 0xfc, 0x3a, 0xd9, 0x53, 0x7b, 0xd3, - 0x37, 0xb3, 0xed, 0x22, 0x91, 0x69, 0x21, 0x27, 0x91, 0xa9, 0xfe, 0x69, 0xc5, 0xb6, 0x9f, 0xf6, - 0x65, 0x0b, 0xc4, 0x0c, 0x39, 0x06, 0x7d, 0xc6, 0x77, 0x98, 0xfa, 0x8c, 0xa9, 0xfc, 0x45, 0x90, - 0xa3, 0xc8, 0xf8, 0x2b, 0x0b, 0xc6, 0x38, 0x41, 0x6c, 0xab, 0xff, 0xa6, 0x8e, 0xc3, 0xac, 0xf9, - 0x45, 0x99, 0xce, 0x97, 0xd7, 0xc9, 0xde, 0x9a, 0x5f, 0x71, 0xa2, 0xad, 0xec, 0x8f, 0x32, 0x06, - 0xab, 0xa7, 0xed, 0x60, 0xd5, 0xe5, 0x02, 0x32, 0xf2, 0x7c, 0x75, 0x08, 0x10, 0x70, 0xd8, 0x3c, - 0x5f, 0xf6, 0x9f, 0x59, 0x80, 0x78, 0x35, 0x86, 0xe0, 0x46, 0xc5, 0x21, 0x06, 0xd5, 0x0e, 0xba, - 0x78, 0x6b, 0x52, 0x18, 0xac, 0x51, 0x1d, 0x49, 0xf7, 0x24, 0x1c, 0x2e, 0x8a, 0x9d, 0x1d, 0x2e, - 0x0e, 0xd1, 0xa3, 0xff, 0xac, 0x0f, 0x92, 0x2f, 0xfb, 0xd0, 0x2d, 0x18, 0xaa, 0x39, 0x4d, 0x67, - 0xdd, 0x6d, 0xb8, 0x91, 0x4b, 0xc2, 0x76, 0xde, 0x58, 0x73, 0x1a, 0x9d, 0x30, 0x91, 0x6b, 0x10, - 0x6c, 0xf0, 0x41, 0xd3, 0x00, 0xcd, 0xc0, 0xdd, 0x75, 0x1b, 0x64, 0x93, 0xa9, 0x5d, 0x58, 0x44, - 0x15, 0xee, 0x1a, 0x26, 0xa1, 0x58, 0xa3, 0xc8, 0x08, 0xa3, 0x50, 0x7c, 0xc8, 0x61, 0x14, 0xe0, - 0xd8, 0xc2, 0x28, 0xf4, 0x1c, 0x2a, 0x8c, 0xc2, 0xc0, 0xa1, 0xc3, 0x28, 0xf4, 0x76, 0x15, 0x46, - 0x01, 0xc3, 0x29, 0x29, 0x7b, 0xd2, 0xff, 0x8b, 0x6e, 0x83, 0x88, 0x0b, 0x07, 0x0f, 0x03, 0x33, - 0x75, 0x7f, 0xbf, 0x7c, 0x0a, 0x67, 0x52, 0xe0, 0x9c, 0x92, 0xe8, 0x63, 0x30, 0xe9, 0x34, 0x1a, - 0xfe, 0x1d, 0x35, 0xa8, 0x0b, 0x61, 0xcd, 0x69, 0x70, 0x13, 0x48, 0x3f, 0xe3, 0x7a, 0xf6, 0xfe, - 0x7e, 0x79, 0x72, 0x26, 0x87, 0x06, 0xe7, 0x96, 0x46, 0x1f, 0x86, 0x52, 0x33, 0xf0, 0x6b, 0x2b, - 0xda, 0xf3, 0xe3, 0xf3, 0xb4, 0x03, 0x2b, 0x12, 0x78, 0xb0, 0x5f, 0x1e, 0x56, 0x7f, 0xd8, 0x81, - 0x1f, 0x17, 0xc8, 0x88, 0x8b, 0x30, 0x78, 0xa4, 0x71, 0x11, 0xb6, 0x61, 0xa2, 0x4a, 0x02, 0xd7, - 0x69, 0xb8, 0xf7, 0xa8, 0xbc, 0x2c, 0xf7, 0xa7, 0x35, 0x28, 0x05, 0x89, 0x1d, 0xb9, 0xab, 0x60, - 0xbd, 0x5a, 0xc2, 0x25, 0xb9, 0x03, 0xc7, 0x8c, 0xec, 0xff, 0x66, 0x41, 0xbf, 0x78, 0xc9, 0x77, - 0x0c, 0x52, 0xe3, 0x8c, 0x61, 0x94, 0x28, 0x67, 0x77, 0x18, 0x6b, 0x4c, 0xae, 0x39, 0x62, 0x29, - 0x61, 0x8e, 0x78, 0xb4, 0x1d, 0x93, 0xf6, 0x86, 0x88, 0xff, 0xaf, 0x48, 0xa5, 0x77, 0xe3, 0x4d, - 0xf9, 0xc3, 0xef, 0x82, 0x55, 0xe8, 0x0f, 0xc5, 0x9b, 0xe6, 0x42, 0xfe, 0x6b, 0x90, 0xe4, 0x20, - 0xc6, 0x5e, 0x74, 0xe2, 0x15, 0xb3, 0x64, 0x92, 0xf9, 0x58, 0xba, 0xf8, 0x10, 0x1f, 0x4b, 0x77, - 0x7a, 0x75, 0xdf, 0x73, 0x14, 0xaf, 0xee, 0xed, 0xaf, 0xb1, 0x93, 0x53, 0x87, 0x1f, 0x83, 0x50, - 0x75, 0xd5, 0x3c, 0x63, 0xed, 0x36, 0x33, 0x4b, 0x34, 0x2a, 0x47, 0xb8, 0xfa, 0x05, 0x0b, 0xce, - 0x65, 0x7c, 0x95, 0x26, 0x69, 0x3d, 0x0b, 0x03, 0x4e, 0xab, 0xee, 0xaa, 0xb5, 0xac, 0x99, 0x26, - 0x67, 0x04, 0x1c, 0x2b, 0x0a, 0x34, 0x07, 0xe3, 0xe4, 0x6e, 0xd3, 0xe5, 0x86, 0x5c, 0xdd, 0xf9, - 0xb8, 0xc8, 0x9f, 0x7f, 0x2e, 0x24, 0x91, 0x38, 0x4d, 0xaf, 0x02, 0x44, 0x15, 0x73, 0x03, 0x44, - 0xfd, 0xbc, 0x05, 0x83, 0xea, 0x55, 0xef, 0x43, 0xef, 0xed, 0x8f, 0x9a, 0xbd, 0xfd, 0x48, 0x9b, - 0xde, 0xce, 0xe9, 0xe6, 0xdf, 0x2b, 0xa8, 0xf6, 0x56, 0xfc, 0x20, 0xea, 0x42, 0x82, 0x7b, 0xf0, - 0x87, 0x13, 0x57, 0x60, 0xd0, 0x69, 0x36, 0x25, 0x42, 0x7a, 0xc0, 0xb1, 0xd0, 0xeb, 0x31, 0x18, - 0xeb, 0x34, 0xea, 0x1d, 0x47, 0x31, 0xf7, 0x1d, 0x47, 0x1d, 0x20, 0x72, 0x82, 0x4d, 0x12, 0x51, - 0x98, 0x70, 0xd8, 0xcd, 0xdf, 0x6f, 0x5a, 0x91, 0xdb, 0x98, 0x76, 0xbd, 0x28, 0x8c, 0x82, 0xe9, - 0x25, 0x2f, 0xba, 0x11, 0xf0, 0x2b, 0xa4, 0x16, 0x62, 0x4d, 0xf1, 0xc2, 0x1a, 0x5f, 0x19, 0xc1, - 0x82, 0xd5, 0xd1, 0x6b, 0xba, 0x52, 0xac, 0x0a, 0x38, 0x56, 0x14, 0xf6, 0x07, 0xd9, 0xe9, 0xc3, - 0xfa, 0xf4, 0x70, 0xe1, 0xc5, 0x7e, 0x72, 0x48, 0x8d, 0x06, 0x33, 0x8a, 0xce, 0xeb, 0x41, 0xcc, - 0xda, 0x6f, 0xf6, 0xb4, 0x62, 0xfd, 0x45, 0x64, 0x1c, 0xe9, 0x0c, 0x7d, 0x22, 0xe5, 0x1e, 0xf3, - 0x5c, 0x87, 0x53, 0xe3, 0x10, 0x0e, 0x31, 0x2c, 0x0f, 0x13, 0xcb, 0x52, 0xb3, 0x54, 0x11, 0xeb, - 0x42, 0xcb, 0xc3, 0x24, 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa2, 0x38, 0x16, - 0xb0, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0x2e, 0x0b, 0x85, 0x02, 0xb7, 0x0b, 0x3c, 0x92, 0x50, 0x28, - 0xc8, 0xee, 0xd2, 0xb4, 0x40, 0x57, 0x60, 0x90, 0xdc, 0x8d, 0x48, 0xe0, 0x39, 0x0d, 0x5a, 0x43, - 0x6f, 0x1c, 0x3f, 0x73, 0x21, 0x06, 0x63, 0x9d, 0x06, 0xad, 0xc1, 0x68, 0xc8, 0xf5, 0x6c, 0x2a, - 0x48, 0x3c, 0xd7, 0x57, 0x3e, 0xad, 0xde, 0x53, 0x9b, 0xe8, 0x03, 0x06, 0xe2, 0xbb, 0x93, 0x8c, - 0x32, 0x91, 0x64, 0x81, 0x5e, 0x83, 0x91, 0x86, 0xef, 0xd4, 0x67, 0x9d, 0x86, 0xe3, 0xd5, 0x58, - 0xff, 0x0c, 0x98, 0x89, 0xa8, 0x97, 0x0d, 0x2c, 0x4e, 0x50, 0x53, 0xe1, 0x4d, 0x87, 0x88, 0x30, - 0x6d, 0x8e, 0xb7, 0x49, 0x42, 0x91, 0x0f, 0x9e, 0x09, 0x6f, 0xcb, 0x39, 0x34, 0x38, 0xb7, 0x34, - 0x7a, 0x19, 0x86, 0xe4, 0xe7, 0x6b, 0x41, 0x59, 0xe2, 0x27, 0x31, 0x1a, 0x0e, 0x1b, 0x94, 0x28, - 0x84, 0x93, 0xf2, 0xff, 0x5a, 0xe0, 0x6c, 0x6c, 0xb8, 0x35, 0x11, 0xa9, 0x80, 0x3f, 0x1f, 0xfe, - 0x88, 0x7c, 0xab, 0xb8, 0x90, 0x45, 0x74, 0xb0, 0x5f, 0x3e, 0x2b, 0x7a, 0x2d, 0x13, 0x8f, 0xb3, - 0x79, 0xa3, 0x15, 0x98, 0xd8, 0x22, 0x4e, 0x23, 0xda, 0x9a, 0xdb, 0x22, 0xb5, 0x6d, 0xb9, 0xe0, - 0x58, 0x98, 0x17, 0xed, 0xe9, 0xc8, 0xb5, 0x34, 0x09, 0xce, 0x2a, 0x87, 0xde, 0x82, 0xc9, 0x66, - 0x6b, 0xbd, 0xe1, 0x86, 0x5b, 0xab, 0x7e, 0xc4, 0x9c, 0x90, 0x66, 0xea, 0xf5, 0x80, 0x84, 0xfc, - 0x75, 0x29, 0x3b, 0x7a, 0x65, 0x20, 0x9d, 0x4a, 0x0e, 0x1d, 0xce, 0xe5, 0x80, 0xee, 0xc1, 0xc9, - 0xc4, 0x44, 0x10, 0x11, 0x31, 0x46, 0xf2, 0x53, 0xc4, 0x54, 0xb3, 0x0a, 0x88, 0xe0, 0x32, 0x59, - 0x28, 0x9c, 0x5d, 0x05, 0x7a, 0x05, 0xc0, 0x6d, 0x2e, 0x3a, 0x3b, 0x6e, 0x83, 0x5e, 0x15, 0x27, - 0xd8, 0x1c, 0xa1, 0xd7, 0x06, 0x58, 0xaa, 0x48, 0x28, 0xdd, 0x9b, 0xc5, 0xbf, 0x3d, 0xac, 0x51, - 0xa3, 0x65, 0x18, 0x11, 0xff, 0xf6, 0xc4, 0x90, 0xf2, 0xc0, 0x2c, 0x8f, 0xb3, 0xa8, 0x5a, 0x15, - 0x1d, 0x73, 0x90, 0x82, 0xe0, 0x44, 0x59, 0xb4, 0x09, 0xe7, 0x64, 0xa2, 0x3f, 0x7d, 0x7e, 0xca, - 0x31, 0x08, 0x59, 0x5e, 0x96, 0x01, 0xfe, 0x2a, 0x65, 0xa6, 0x1d, 0x21, 0x6e, 0xcf, 0x87, 0x9e, - 0xeb, 0xfa, 0x34, 0xe7, 0x6f, 0x8e, 0x4f, 0xc6, 0x11, 0x07, 0x97, 0x93, 0x48, 0x9c, 0xa6, 0x47, - 0x3e, 0x9c, 0x74, 0xbd, 0xac, 0x59, 0x7d, 0x8a, 0x31, 0xfa, 0x10, 0x7f, 0x6e, 0xdd, 0x7e, 0x46, - 0x67, 0xe2, 0x71, 0x36, 0xdf, 0x77, 0xe6, 0xf7, 0xf7, 0xfb, 0x16, 0x2d, 0xad, 0x49, 0xe7, 0xe8, - 0xd3, 0x30, 0xa4, 0x7f, 0x94, 0x90, 0x34, 0x2e, 0x66, 0x0b, 0xaf, 0xda, 0x9e, 0xc0, 0x65, 0x7b, - 0xb5, 0xee, 0x75, 0x1c, 0x36, 0x38, 0xa2, 0x5a, 0x46, 0x6c, 0x83, 0xcb, 0xdd, 0x49, 0x32, 0xdd, - 0xbb, 0xbd, 0x11, 0xc8, 0x9e, 0xee, 0x68, 0x19, 0x06, 0x6a, 0x0d, 0x97, 0x78, 0xd1, 0x52, 0xa5, - 0x5d, 0xf4, 0xc6, 0x39, 0x41, 0x23, 0xd6, 0x8f, 0x48, 0xb1, 0xc2, 0x61, 0x58, 0x71, 0xb0, 0x7f, - 0xb3, 0x00, 0xe5, 0x0e, 0xf9, 0x7a, 0x12, 0x66, 0x28, 0xab, 0x2b, 0x33, 0xd4, 0x0c, 0x8c, 0xc6, - 0xff, 0x74, 0x0d, 0x97, 0xf2, 0x64, 0xbd, 0x65, 0xa2, 0x71, 0x92, 0xbe, 0xeb, 0x47, 0x09, 0xba, - 0x25, 0xab, 0xa7, 0xe3, 0xb3, 0x1a, 0xc3, 0x82, 0xdd, 0xdb, 0xfd, 0xb5, 0x37, 0xd7, 0x1a, 0x69, - 0x7f, 0xad, 0x00, 0x27, 0x55, 0x17, 0x7e, 0xfb, 0x76, 0xdc, 0xcd, 0x74, 0xc7, 0x1d, 0x81, 0x2d, - 0xd7, 0xbe, 0x01, 0x7d, 0x3c, 0x1c, 0x65, 0x17, 0xe2, 0xf6, 0x63, 0x66, 0x94, 0x6c, 0x25, 0xe1, - 0x19, 0x91, 0xb2, 0xbf, 0xdf, 0x82, 0xd1, 0xc4, 0xeb, 0x36, 0x84, 0xb5, 0x27, 0xd0, 0x0f, 0x22, - 0x12, 0x67, 0x09, 0xdb, 0x17, 0xa0, 0x67, 0xcb, 0x0f, 0xa3, 0xa4, 0xa3, 0xc7, 0x35, 0x3f, 0x8c, - 0x30, 0xc3, 0xd8, 0x7f, 0x68, 0x41, 0xef, 0x9a, 0xe3, 0x7a, 0x91, 0x34, 0x0a, 0x58, 0x39, 0x46, - 0x81, 0x6e, 0xbe, 0x0b, 0xbd, 0x04, 0x7d, 0x64, 0x63, 0x83, 0xd4, 0x22, 0x31, 0xaa, 0x32, 0x14, - 0x42, 0xdf, 0x02, 0x83, 0x52, 0xf9, 0x8f, 0x55, 0xc6, 0xff, 0x62, 0x41, 0x8c, 0x6e, 0x43, 0x29, - 0x72, 0x77, 0xc8, 0x4c, 0xbd, 0x2e, 0x4c, 0xe5, 0x0f, 0x10, 0xbf, 0x63, 0x4d, 0x32, 0xc0, 0x31, - 0x2f, 0xfb, 0x8b, 0x05, 0x80, 0x38, 0x8e, 0x57, 0xa7, 0x4f, 0x9c, 0x4d, 0x19, 0x51, 0x2f, 0x66, - 0x18, 0x51, 0x51, 0xcc, 0x30, 0xc3, 0x82, 0xaa, 0xba, 0xa9, 0xd8, 0x55, 0x37, 0xf5, 0x1c, 0xa6, - 0x9b, 0xe6, 0x60, 0x3c, 0x8e, 0x43, 0x66, 0x86, 0x61, 0x64, 0x47, 0xe7, 0x5a, 0x12, 0x89, 0xd3, - 0xf4, 0x36, 0x81, 0x0b, 0x2a, 0x1c, 0x93, 0x38, 0xd1, 0x98, 0x1f, 0xb8, 0x6e, 0x94, 0xee, 0xd0, - 0x4f, 0xb1, 0x95, 0xb8, 0x90, 0x6b, 0x25, 0xfe, 0x09, 0x0b, 0x4e, 0x24, 0xeb, 0x61, 0x8f, 0xa6, - 0xbf, 0x60, 0xc1, 0x49, 0x66, 0x2b, 0x67, 0xb5, 0xa6, 0x2d, 0xf3, 0x2f, 0xb6, 0x0d, 0x31, 0x95, - 0xd3, 0xe2, 0x38, 0xe6, 0xc6, 0x4a, 0x16, 0x6b, 0x9c, 0x5d, 0xa3, 0xfd, 0x5f, 0x7b, 0x60, 0x32, - 0x2f, 0x36, 0x15, 0x7b, 0x26, 0xe2, 0xdc, 0xad, 0x6e, 0x93, 0x3b, 0xc2, 0x19, 0x3f, 0x7e, 0x26, - 0xc2, 0xc1, 0x58, 0xe2, 0x93, 0xe9, 0x4f, 0x0a, 0x5d, 0xa6, 0x3f, 0xd9, 0x82, 0xf1, 0x3b, 0x5b, - 0xc4, 0xbb, 0xe9, 0x85, 0x4e, 0xe4, 0x86, 0x1b, 0x2e, 0xb3, 0x2b, 0xf3, 0x79, 0x23, 0x73, 0x50, - 0x8f, 0xdf, 0x4e, 0x12, 0x1c, 0xec, 0x97, 0xcf, 0x19, 0x80, 0xb8, 0xc9, 0x7c, 0x23, 0xc1, 0x69, - 0xa6, 0xe9, 0xec, 0x31, 0x3d, 0x0f, 0x39, 0x7b, 0xcc, 0x8e, 0x2b, 0xbc, 0x51, 0xe4, 0x1b, 0x00, - 0x76, 0x63, 0x5c, 0x51, 0x50, 0xac, 0x51, 0xa0, 0x4f, 0x02, 0xd2, 0x33, 0x74, 0x19, 0xa1, 0x41, - 0x9f, 0xbb, 0xbf, 0x5f, 0x46, 0xab, 0x29, 0xec, 0xc1, 0x7e, 0x79, 0x82, 0x42, 0x97, 0x3c, 0x7a, - 0xf3, 0x8c, 0xe3, 0xa9, 0x65, 0x30, 0x42, 0xb7, 0x61, 0x8c, 0x42, 0xd9, 0x8a, 0x92, 0x71, 0x47, - 0xf9, 0x6d, 0xf1, 0x99, 0xfb, 0xfb, 0xe5, 0xb1, 0xd5, 0x04, 0x2e, 0x8f, 0x75, 0x8a, 0x09, 0x7a, - 0x05, 0x46, 0xe2, 0x79, 0x75, 0x9d, 0xec, 0xf1, 0x00, 0x3d, 0x25, 0xae, 0xf0, 0x5e, 0x31, 0x30, - 0x38, 0x41, 0x69, 0x7f, 0xc1, 0x82, 0x33, 0xb9, 0x19, 0xf1, 0xd1, 0x25, 0x18, 0x70, 0x9a, 0x2e, - 0x37, 0x5f, 0x88, 0xa3, 0x86, 0xa9, 0xc9, 0x2a, 0x4b, 0xdc, 0x78, 0xa1, 0xb0, 0x74, 0x87, 0xdf, - 0x76, 0xbd, 0x7a, 0x72, 0x87, 0xbf, 0xee, 0x7a, 0x75, 0xcc, 0x30, 0xea, 0xc8, 0x2a, 0xe6, 0x3e, - 0x45, 0xf8, 0x0a, 0x5d, 0xab, 0x19, 0xb9, 0xf3, 0x8f, 0xb7, 0x19, 0xe8, 0x19, 0xdd, 0xd4, 0x28, - 0xbc, 0x0a, 0x73, 0xcd, 0x8c, 0xdf, 0x67, 0x81, 0x78, 0xba, 0xdc, 0xc5, 0x99, 0xfc, 0x26, 0x0c, - 0xed, 0xa6, 0xb3, 0x17, 0x5e, 0xc8, 0x7f, 0xcb, 0x2d, 0x22, 0xae, 0x2b, 0x41, 0xdb, 0xc8, 0x54, - 0x68, 0xf0, 0xb2, 0xeb, 0x20, 0xb0, 0xf3, 0x84, 0x19, 0x14, 0x3a, 0xb7, 0xe6, 0x79, 0x80, 0x3a, - 0xa3, 0x65, 0x29, 0x8d, 0x0b, 0xa6, 0xc4, 0x35, 0xaf, 0x30, 0x58, 0xa3, 0xb2, 0xff, 0x79, 0x01, - 0x06, 0x65, 0xb6, 0xbc, 0x96, 0xd7, 0x8d, 0xda, 0xef, 0x50, 0xe9, 0xb3, 0xd1, 0x65, 0x28, 0x31, - 0xbd, 0x74, 0x25, 0xd6, 0x96, 0x2a, 0xad, 0xd0, 0x8a, 0x44, 0xe0, 0x98, 0x86, 0xee, 0x8e, 0x61, - 0x6b, 0x9d, 0x91, 0x27, 0x1e, 0xda, 0x56, 0x39, 0x18, 0x4b, 0x3c, 0xfa, 0x18, 0x8c, 0xf1, 0x72, - 0x81, 0xdf, 0x74, 0x36, 0xb9, 0x2d, 0xab, 0x57, 0x45, 0x2f, 0x19, 0x5b, 0x49, 0xe0, 0x0e, 0xf6, - 0xcb, 0x27, 0x92, 0x30, 0x66, 0xa4, 0x4d, 0x71, 0x61, 0x2e, 0x6b, 0xbc, 0x12, 0xba, 0xab, 0xa7, - 0x3c, 0xdd, 0x62, 0x14, 0xd6, 0xe9, 0xec, 0x4f, 0x03, 0x4a, 0xe7, 0x0d, 0x44, 0xaf, 0x73, 0x97, - 0x67, 0x37, 0x20, 0xf5, 0x76, 0x46, 0x5b, 0x3d, 0x46, 0x87, 0x7c, 0x23, 0xc7, 0x4b, 0x61, 0x55, - 0xde, 0xfe, 0x3f, 0x8b, 0x30, 0x96, 0x8c, 0x0a, 0x80, 0xae, 0x41, 0x1f, 0x17, 0x29, 0x05, 0xfb, - 0x36, 0x3e, 0x41, 0x5a, 0x2c, 0x01, 0x76, 0xb8, 0x0a, 0xa9, 0x54, 0x94, 0x47, 0x6f, 0xc1, 0x60, - 0xdd, 0xbf, 0xe3, 0xdd, 0x71, 0x82, 0xfa, 0x4c, 0x65, 0x49, 0x4c, 0xe7, 0x4c, 0x45, 0xc5, 0x7c, - 0x4c, 0xa6, 0xc7, 0x27, 0x60, 0xf6, 0xef, 0x18, 0x85, 0x75, 0x76, 0x68, 0x8d, 0x25, 0xfa, 0xd8, - 0x70, 0x37, 0x57, 0x9c, 0x66, 0xbb, 0xf7, 0x2f, 0x73, 0x92, 0x48, 0xe3, 0x3c, 0x2c, 0xb2, 0x81, - 0x70, 0x04, 0x8e, 0x19, 0xa1, 0xcf, 0xc2, 0x44, 0x98, 0x63, 0x3a, 0xc9, 0x4b, 0x23, 0xdb, 0xce, - 0x9a, 0x30, 0x7b, 0xfa, 0xfe, 0x7e, 0x79, 0x22, 0xcb, 0xc8, 0x92, 0x55, 0x8d, 0xfd, 0xa5, 0x13, - 0x60, 0x2c, 0x62, 0x23, 0xab, 0xb8, 0x75, 0x44, 0x59, 0xc5, 0x31, 0x0c, 0x90, 0x9d, 0x66, 0xb4, - 0x37, 0xef, 0x06, 0x62, 0x4c, 0x32, 0x79, 0x2e, 0x08, 0x9a, 0x34, 0x4f, 0x89, 0xc1, 0x8a, 0x4f, - 0x76, 0xea, 0xf7, 0xe2, 0x37, 0x31, 0xf5, 0x7b, 0xcf, 0x31, 0xa6, 0x7e, 0x5f, 0x85, 0xfe, 0x4d, - 0x37, 0xc2, 0xa4, 0xe9, 0x8b, 0xcb, 0x5c, 0xe6, 0x3c, 0xbc, 0xca, 0x49, 0xd2, 0x49, 0x86, 0x05, - 0x02, 0x4b, 0x26, 0xe8, 0x75, 0xb5, 0x02, 0xfb, 0xf2, 0x15, 0x2e, 0x69, 0xe7, 0x95, 0xcc, 0x35, - 0x28, 0x12, 0xbc, 0xf7, 0x3f, 0x68, 0x82, 0xf7, 0x45, 0x99, 0x96, 0x7d, 0x20, 0xff, 0xb1, 0x1a, - 0xcb, 0xba, 0xde, 0x21, 0x19, 0xfb, 0x2d, 0x3d, 0x95, 0x7d, 0x29, 0x7f, 0x27, 0x50, 0x59, 0xea, - 0xbb, 0x4c, 0x60, 0xff, 0x7d, 0x16, 0x9c, 0x4c, 0xa6, 0x9a, 0x65, 0x6f, 0x2a, 0x84, 0x9f, 0xc7, - 0x4b, 0xdd, 0xe4, 0xfe, 0x65, 0x05, 0x8c, 0x0a, 0x99, 0x8e, 0x34, 0x93, 0x0c, 0x67, 0x57, 0x47, - 0x3b, 0x3a, 0x58, 0xaf, 0x0b, 0x7f, 0x83, 0xc7, 0x72, 0x32, 0xe1, 0xb7, 0xc9, 0x7f, 0xbf, 0x96, - 0x91, 0x75, 0xfd, 0xf1, 0xbc, 0xac, 0xeb, 0x5d, 0xe7, 0x5a, 0x7f, 0x5d, 0xe5, 0xc0, 0x1f, 0xce, - 0x9f, 0x4a, 0x3c, 0xc3, 0x7d, 0xc7, 0xcc, 0xf7, 0xaf, 0xab, 0xcc, 0xf7, 0x6d, 0x22, 0x8b, 0xf3, - 0xbc, 0xf6, 0x1d, 0xf3, 0xdd, 0x6b, 0x39, 0xeb, 0x47, 0x8f, 0x26, 0x67, 0xbd, 0x71, 0xd4, 0xf0, - 0xb4, 0xe9, 0xcf, 0x74, 0x38, 0x6a, 0x0c, 0xbe, 0xed, 0x0f, 0x1b, 0x9e, 0x9f, 0x7f, 0xfc, 0x81, - 0xf2, 0xf3, 0xdf, 0xd2, 0xf3, 0xdd, 0xa3, 0x0e, 0x09, 0xdd, 0x29, 0x51, 0x97, 0x59, 0xee, 0x6f, - 0xe9, 0x07, 0xe0, 0x44, 0x3e, 0x5f, 0x75, 0xce, 0xa5, 0xf9, 0x66, 0x1e, 0x81, 0xa9, 0xec, 0xf9, - 0x27, 0x8e, 0x27, 0x7b, 0xfe, 0xc9, 0x23, 0xcf, 0x9e, 0x7f, 0xea, 0x18, 0xb2, 0xe7, 0x9f, 0x3e, - 0xc6, 0xec, 0xf9, 0xb7, 0x98, 0x73, 0x14, 0x0f, 0x00, 0x25, 0x22, 0xa1, 0x3f, 0x95, 0x13, 0x3f, - 0x2d, 0x1d, 0x25, 0x8a, 0x7f, 0x9c, 0x42, 0xe1, 0x98, 0x55, 0x46, 0x56, 0xfe, 0xc9, 0x87, 0x90, - 0x95, 0x7f, 0x35, 0xce, 0xca, 0x7f, 0x26, 0x7f, 0xa8, 0x33, 0x9e, 0xd3, 0xe4, 0xe4, 0xe2, 0xbf, - 0xa5, 0xe7, 0xd0, 0x7f, 0xa4, 0x8d, 0x15, 0x2c, 0x4b, 0xa1, 0xdc, 0x26, 0x73, 0xfe, 0x6b, 0x3c, - 0x73, 0xfe, 0xd9, 0xfc, 0x9d, 0x3c, 0x79, 0xdc, 0x19, 0xf9, 0xf2, 0x69, 0xbb, 0x54, 0xf0, 0x57, - 0x16, 0xf3, 0x3d, 0xa7, 0x5d, 0x2a, 0x7a, 0x6c, 0xba, 0x5d, 0x0a, 0x85, 0x63, 0x56, 0xf6, 0x0f, - 0x14, 0xe0, 0x7c, 0xfb, 0xf5, 0x16, 0x6b, 0xc9, 0x2b, 0xb1, 0x43, 0x40, 0x42, 0x4b, 0xce, 0xef, - 0x6c, 0x31, 0x55, 0xd7, 0xf1, 0x20, 0xaf, 0xc2, 0xb8, 0x7a, 0x87, 0xd3, 0x70, 0x6b, 0x7b, 0xab, - 0xf1, 0x35, 0x59, 0x45, 0x4e, 0xa8, 0x26, 0x09, 0x70, 0xba, 0x0c, 0x9a, 0x81, 0x51, 0x03, 0xb8, - 0x34, 0x2f, 0xee, 0x66, 0x71, 0x94, 0x71, 0x13, 0x8d, 0x93, 0xf4, 0xf6, 0xcf, 0x59, 0x70, 0x3a, - 0x27, 0xe5, 0x6b, 0xd7, 0xe1, 0x0e, 0x37, 0x60, 0xb4, 0x69, 0x16, 0xed, 0x10, 0xa1, 0xd5, 0x48, - 0x2c, 0xab, 0xda, 0x9a, 0x40, 0xe0, 0x24, 0x53, 0xfb, 0x67, 0x0a, 0x70, 0xae, 0xad, 0x63, 0x29, - 0xc2, 0x70, 0x6a, 0x73, 0x27, 0x74, 0xe6, 0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, 0x8d, 0x6a, 0x93, - 0xd4, 0x34, 0x3b, 0x07, 0xf3, 0xd0, 0xbc, 0xba, 0x52, 0x9d, 0x49, 0x53, 0xe0, 0x9c, 0x92, 0x68, - 0x11, 0x50, 0x1a, 0x23, 0x46, 0x98, 0x65, 0x0f, 0x48, 0xf3, 0xc3, 0x19, 0x25, 0xd0, 0x07, 0x61, - 0x58, 0x39, 0xac, 0x6a, 0x23, 0xce, 0x36, 0x76, 0xac, 0x23, 0xb0, 0x49, 0x87, 0xae, 0xf0, 0xf4, - 0x13, 0x22, 0x51, 0x89, 0x30, 0x8a, 0x8c, 0xca, 0xdc, 0x12, 0x02, 0x8c, 0x75, 0x9a, 0xd9, 0x97, - 0x7f, 0xeb, 0x1b, 0xe7, 0xdf, 0xf7, 0xbb, 0xdf, 0x38, 0xff, 0xbe, 0x3f, 0xf8, 0xc6, 0xf9, 0xf7, - 0x7d, 0xd7, 0xfd, 0xf3, 0xd6, 0x6f, 0xdd, 0x3f, 0x6f, 0xfd, 0xee, 0xfd, 0xf3, 0xd6, 0x1f, 0xdc, - 0x3f, 0x6f, 0xfd, 0xf1, 0xfd, 0xf3, 0xd6, 0x17, 0xff, 0xe4, 0xfc, 0xfb, 0xde, 0x44, 0x71, 0x00, - 0xd1, 0xcb, 0x74, 0x74, 0x2e, 0xef, 0x5e, 0xf9, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x0b, - 0x0a, 0x3d, 0x91, 0x13, 0x01, 0x00, + // 15465 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x1c, 0xc9, + 0x75, 0x18, 0xcc, 0xea, 0x9e, 0xab, 0xdf, 0xdc, 0x39, 0x00, 0x76, 0x30, 0x0b, 0xa0, 0xb1, 0xb5, + 0xbb, 0x58, 0xec, 0x35, 0x20, 0xf6, 0x20, 0x97, 0xbb, 0xcb, 0x15, 0xe7, 0x04, 0x66, 0x31, 0x33, + 0xe8, 0xcd, 0x1e, 0x00, 0xe4, 0x72, 0xc9, 0x8f, 0x85, 0xee, 0x9c, 0x99, 0xe2, 0x74, 0x57, 0xf5, + 0x56, 0x55, 0x0f, 0x30, 0xf8, 0xc8, 0x90, 0x44, 0x7d, 0xa2, 0x44, 0x4a, 0x5f, 0x04, 0xe3, 0x0b, + 0x7d, 0x47, 0x50, 0x0a, 0xc5, 0x17, 0x92, 0xac, 0xc3, 0xb4, 0x64, 0xd3, 0x94, 0x25, 0x59, 0xd4, + 0xe5, 0x2b, 0x2c, 0x29, 0x1c, 0xb2, 0xac, 0x08, 0x8b, 0x0a, 0x2b, 0x3c, 0x32, 0x21, 0x47, 0x28, + 0xf4, 0xc3, 0x92, 0x7c, 0xfc, 0xb0, 0x61, 0xd9, 0x72, 0xe4, 0x59, 0x99, 0x75, 0x74, 0xf7, 0x60, + 0x07, 0xc3, 0x25, 0x63, 0xff, 0x75, 0xbf, 0xf7, 0xf2, 0x65, 0x56, 0x9e, 0x2f, 0xdf, 0x7b, 0xf9, + 0x1e, 0xbc, 0xb2, 0xf3, 0x52, 0x38, 0xeb, 0xfa, 0x17, 0x76, 0xda, 0x37, 0x49, 0xe0, 0x91, 0x88, + 0x84, 0x17, 0x76, 0x89, 0x57, 0xf7, 0x83, 0x0b, 0x02, 0xe1, 0xb4, 0xdc, 0x0b, 0x35, 0x3f, 0x20, + 0x17, 0x76, 0x2f, 0x5e, 0xd8, 0x22, 0x1e, 0x09, 0x9c, 0x88, 0xd4, 0x67, 0x5b, 0x81, 0x1f, 0xf9, + 0x08, 0x71, 0x9a, 0x59, 0xa7, 0xe5, 0xce, 0x52, 0x9a, 0xd9, 0xdd, 0x8b, 0x33, 0xcf, 0x6e, 0xb9, + 0xd1, 0x76, 0xfb, 0xe6, 0x6c, 0xcd, 0x6f, 0x5e, 0xd8, 0xf2, 0xb7, 0xfc, 0x0b, 0x8c, 0xf4, 0x66, + 0x7b, 0x93, 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0xce, 0x62, 0xe6, 0x85, 0xb8, 0x9a, 0xa6, 0x53, 0xdb, + 0x76, 0x3d, 0x12, 0xec, 0x5d, 0x68, 0xed, 0x6c, 0xb1, 0x7a, 0x03, 0x12, 0xfa, 0xed, 0xa0, 0x46, + 0x92, 0x15, 0x77, 0x2c, 0x15, 0x5e, 0x68, 0x92, 0xc8, 0xc9, 0x68, 0xee, 0xcc, 0x85, 0xbc, 0x52, + 0x41, 0xdb, 0x8b, 0xdc, 0x66, 0xba, 0x9a, 0x0f, 0x74, 0x2b, 0x10, 0xd6, 0xb6, 0x49, 0xd3, 0x49, + 0x95, 0x7b, 0x3e, 0xaf, 0x5c, 0x3b, 0x72, 0x1b, 0x17, 0x5c, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc8, + 0xfe, 0x86, 0x05, 0x67, 0xe7, 0x6e, 0x54, 0x97, 0x1a, 0x4e, 0x18, 0xb9, 0xb5, 0xf9, 0x86, 0x5f, + 0xdb, 0xa9, 0x46, 0x7e, 0x40, 0xae, 0xfb, 0x8d, 0x76, 0x93, 0x54, 0x59, 0x47, 0xa0, 0x67, 0x60, + 0x68, 0x97, 0xfd, 0x5f, 0x59, 0x9c, 0xb6, 0xce, 0x5a, 0xe7, 0x4b, 0xf3, 0x13, 0xbf, 0xbd, 0x5f, + 0x7e, 0xdf, 0xdd, 0xfd, 0xf2, 0xd0, 0x75, 0x01, 0xc7, 0x8a, 0x02, 0x9d, 0x83, 0x81, 0xcd, 0x70, + 0x63, 0xaf, 0x45, 0xa6, 0x0b, 0x8c, 0x76, 0x4c, 0xd0, 0x0e, 0x2c, 0x57, 0x29, 0x14, 0x0b, 0x2c, + 0xba, 0x00, 0xa5, 0x96, 0x13, 0x44, 0x6e, 0xe4, 0xfa, 0xde, 0x74, 0xf1, 0xac, 0x75, 0xbe, 0x7f, + 0x7e, 0x52, 0x90, 0x96, 0x2a, 0x12, 0x81, 0x63, 0x1a, 0xda, 0x8c, 0x80, 0x38, 0xf5, 0xab, 0x5e, + 0x63, 0x6f, 0xba, 0xef, 0xac, 0x75, 0x7e, 0x28, 0x6e, 0x06, 0x16, 0x70, 0xac, 0x28, 0xec, 0x2f, + 0x17, 0x60, 0x68, 0x6e, 0x73, 0xd3, 0xf5, 0xdc, 0x68, 0x0f, 0x5d, 0x87, 0x11, 0xcf, 0xaf, 0x13, + 0xf9, 0x9f, 0x7d, 0xc5, 0xf0, 0x73, 0x67, 0x67, 0xd3, 0x53, 0x69, 0x76, 0x5d, 0xa3, 0x9b, 0x9f, + 0xb8, 0xbb, 0x5f, 0x1e, 0xd1, 0x21, 0xd8, 0xe0, 0x83, 0x30, 0x0c, 0xb7, 0xfc, 0xba, 0x62, 0x5b, + 0x60, 0x6c, 0xcb, 0x59, 0x6c, 0x2b, 0x31, 0xd9, 0xfc, 0xf8, 0xdd, 0xfd, 0xf2, 0xb0, 0x06, 0xc0, + 0x3a, 0x13, 0x74, 0x13, 0xc6, 0xe9, 0x5f, 0x2f, 0x72, 0x15, 0xdf, 0x22, 0xe3, 0xfb, 0x68, 0x1e, + 0x5f, 0x8d, 0x74, 0x7e, 0xea, 0xee, 0x7e, 0x79, 0x3c, 0x01, 0xc4, 0x49, 0x86, 0xf6, 0x1d, 0x18, + 0x9b, 0x8b, 0x22, 0xa7, 0xb6, 0x4d, 0xea, 0x7c, 0x04, 0xd1, 0x0b, 0xd0, 0xe7, 0x39, 0x4d, 0x22, + 0xc6, 0xf7, 0xac, 0xe8, 0xd8, 0xbe, 0x75, 0xa7, 0x49, 0xee, 0xed, 0x97, 0x27, 0xae, 0x79, 0xee, + 0xdb, 0x6d, 0x31, 0x2b, 0x28, 0x0c, 0x33, 0x6a, 0xf4, 0x1c, 0x40, 0x9d, 0xec, 0xba, 0x35, 0x52, + 0x71, 0xa2, 0x6d, 0x31, 0xde, 0x48, 0x94, 0x85, 0x45, 0x85, 0xc1, 0x1a, 0x95, 0x7d, 0x1b, 0x4a, + 0x73, 0xbb, 0xbe, 0x5b, 0xaf, 0xf8, 0xf5, 0x10, 0xed, 0xc0, 0x78, 0x2b, 0x20, 0x9b, 0x24, 0x50, + 0xa0, 0x69, 0xeb, 0x6c, 0xf1, 0xfc, 0xf0, 0x73, 0xe7, 0x33, 0x3f, 0xd6, 0x24, 0x5d, 0xf2, 0xa2, + 0x60, 0x6f, 0xfe, 0x21, 0x51, 0xdf, 0x78, 0x02, 0x8b, 0x93, 0x9c, 0xed, 0x7f, 0x5a, 0x80, 0xe3, + 0x73, 0x77, 0xda, 0x01, 0x59, 0x74, 0xc3, 0x9d, 0xe4, 0x0c, 0xaf, 0xbb, 0xe1, 0xce, 0x7a, 0xdc, + 0x03, 0x6a, 0x6a, 0x2d, 0x0a, 0x38, 0x56, 0x14, 0xe8, 0x59, 0x18, 0xa4, 0xbf, 0xaf, 0xe1, 0x15, + 0xf1, 0xc9, 0x53, 0x82, 0x78, 0x78, 0xd1, 0x89, 0x9c, 0x45, 0x8e, 0xc2, 0x92, 0x06, 0xad, 0xc1, + 0x70, 0x8d, 0x2d, 0xc8, 0xad, 0x35, 0xbf, 0x4e, 0xd8, 0x60, 0x96, 0xe6, 0x9f, 0xa6, 0xe4, 0x0b, + 0x31, 0xf8, 0xde, 0x7e, 0x79, 0x9a, 0xb7, 0x4d, 0xb0, 0xd0, 0x70, 0x58, 0x2f, 0x8f, 0x6c, 0xb5, + 0xbe, 0xfa, 0x18, 0x27, 0xc8, 0x58, 0x5b, 0xe7, 0xb5, 0xa5, 0xd2, 0xcf, 0x96, 0xca, 0x48, 0xf6, + 0x32, 0x41, 0x17, 0xa1, 0x6f, 0xc7, 0xf5, 0xea, 0xd3, 0x03, 0x8c, 0xd7, 0x69, 0x3a, 0xe6, 0x57, + 0x5c, 0xaf, 0x7e, 0x6f, 0xbf, 0x3c, 0x69, 0x34, 0x87, 0x02, 0x31, 0x23, 0xb5, 0xff, 0xb3, 0x05, + 0x65, 0x86, 0x5b, 0x76, 0x1b, 0xa4, 0x42, 0x82, 0xd0, 0x0d, 0x23, 0xe2, 0x45, 0x46, 0x87, 0x3e, + 0x07, 0x10, 0x92, 0x5a, 0x40, 0x22, 0xad, 0x4b, 0xd5, 0xc4, 0xa8, 0x2a, 0x0c, 0xd6, 0xa8, 0xe8, + 0x86, 0x10, 0x6e, 0x3b, 0x01, 0x9b, 0x5f, 0xa2, 0x63, 0xd5, 0x86, 0x50, 0x95, 0x08, 0x1c, 0xd3, + 0x18, 0x1b, 0x42, 0xb1, 0xdb, 0x86, 0x80, 0x3e, 0x0c, 0xe3, 0x71, 0x65, 0x61, 0xcb, 0xa9, 0xc9, + 0x0e, 0x64, 0x4b, 0xa6, 0x6a, 0xa2, 0x70, 0x92, 0xd6, 0xfe, 0xdb, 0x96, 0x98, 0x3c, 0xf4, 0xab, + 0xdf, 0xe5, 0xdf, 0x6a, 0xff, 0x8a, 0x05, 0x83, 0xf3, 0xae, 0x57, 0x77, 0xbd, 0x2d, 0xf4, 0x29, + 0x18, 0xa2, 0x67, 0x53, 0xdd, 0x89, 0x1c, 0xb1, 0xef, 0xbd, 0x5f, 0x5b, 0x5b, 0xea, 0xa8, 0x98, + 0x6d, 0xed, 0x6c, 0x51, 0x40, 0x38, 0x4b, 0xa9, 0xe9, 0x6a, 0xbb, 0x7a, 0xf3, 0xd3, 0xa4, 0x16, + 0xad, 0x91, 0xc8, 0x89, 0x3f, 0x27, 0x86, 0x61, 0xc5, 0x15, 0x5d, 0x81, 0x81, 0xc8, 0x09, 0xb6, + 0x48, 0x24, 0x36, 0xc0, 0xcc, 0x8d, 0x8a, 0x97, 0xc4, 0x74, 0x45, 0x12, 0xaf, 0x46, 0xe2, 0x63, + 0x61, 0x83, 0x15, 0xc5, 0x82, 0x85, 0xfd, 0x3f, 0x06, 0xe1, 0xe4, 0x42, 0x75, 0x25, 0x67, 0x5e, + 0x9d, 0x83, 0x81, 0x7a, 0xe0, 0xee, 0x92, 0x40, 0xf4, 0xb3, 0xe2, 0xb2, 0xc8, 0xa0, 0x58, 0x60, + 0xd1, 0x4b, 0x30, 0xc2, 0x0f, 0xa4, 0xcb, 0x8e, 0x57, 0x6f, 0xc8, 0x2e, 0x3e, 0x26, 0xa8, 0x47, + 0xae, 0x6b, 0x38, 0x6c, 0x50, 0x1e, 0x70, 0x52, 0x9d, 0x4b, 0x2c, 0xc6, 0xbc, 0xc3, 0xee, 0x0b, + 0x16, 0x4c, 0xf0, 0x6a, 0xe6, 0xa2, 0x28, 0x70, 0x6f, 0xb6, 0x23, 0x12, 0x4e, 0xf7, 0xb3, 0x9d, + 0x6e, 0x21, 0xab, 0xb7, 0x72, 0x7b, 0x60, 0xf6, 0x7a, 0x82, 0x0b, 0xdf, 0x04, 0xa7, 0x45, 0xbd, + 0x13, 0x49, 0x34, 0x4e, 0x55, 0x8b, 0xbe, 0xcf, 0x82, 0x99, 0x9a, 0xef, 0x45, 0x81, 0xdf, 0x68, + 0x90, 0xa0, 0xd2, 0xbe, 0xd9, 0x70, 0xc3, 0x6d, 0x3e, 0x4f, 0x31, 0xd9, 0x64, 0x3b, 0x41, 0xce, + 0x18, 0x2a, 0x22, 0x31, 0x86, 0x67, 0xee, 0xee, 0x97, 0x67, 0x16, 0x72, 0x59, 0xe1, 0x0e, 0xd5, + 0xa0, 0x1d, 0x40, 0xf4, 0x28, 0xad, 0x46, 0xce, 0x16, 0x89, 0x2b, 0x1f, 0xec, 0xbd, 0xf2, 0x13, + 0x77, 0xf7, 0xcb, 0x68, 0x3d, 0xc5, 0x02, 0x67, 0xb0, 0x45, 0x6f, 0xc3, 0x31, 0x0a, 0x4d, 0x7d, + 0xeb, 0x50, 0xef, 0xd5, 0x4d, 0xdf, 0xdd, 0x2f, 0x1f, 0x5b, 0xcf, 0x60, 0x82, 0x33, 0x59, 0xa3, + 0xef, 0xb1, 0xe0, 0x64, 0xfc, 0xf9, 0x4b, 0xb7, 0x5b, 0x8e, 0x57, 0x8f, 0x2b, 0x2e, 0xf5, 0x5e, + 0x31, 0xdd, 0x93, 0x4f, 0x2e, 0xe4, 0x71, 0xc2, 0xf9, 0x95, 0x20, 0x0f, 0xa6, 0x68, 0xd3, 0x92, + 0x75, 0x43, 0xef, 0x75, 0x3f, 0x74, 0x77, 0xbf, 0x3c, 0xb5, 0x9e, 0xe6, 0x81, 0xb3, 0x18, 0xcf, + 0x2c, 0xc0, 0xf1, 0xcc, 0xd9, 0x89, 0x26, 0xa0, 0xb8, 0x43, 0xb8, 0xd4, 0x55, 0xc2, 0xf4, 0x27, + 0x3a, 0x06, 0xfd, 0xbb, 0x4e, 0xa3, 0x2d, 0x16, 0x26, 0xe6, 0x7f, 0x5e, 0x2e, 0xbc, 0x64, 0xd9, + 0xff, 0xac, 0x08, 0xe3, 0x0b, 0xd5, 0x95, 0xfb, 0x5a, 0xf5, 0xfa, 0xb1, 0x57, 0xe8, 0x78, 0xec, + 0xc5, 0x87, 0x68, 0x31, 0xf7, 0x10, 0xfd, 0xee, 0x8c, 0x25, 0xdb, 0xc7, 0x96, 0xec, 0x87, 0x72, + 0x96, 0xec, 0x21, 0x2f, 0xd4, 0xdd, 0x9c, 0x59, 0xdb, 0xcf, 0x06, 0x30, 0x53, 0x42, 0x5a, 0xf5, + 0x6b, 0x4e, 0x23, 0xb9, 0xd5, 0x1e, 0x70, 0xea, 0x1e, 0xce, 0x38, 0xd6, 0x60, 0x64, 0xc1, 0x69, + 0x39, 0x37, 0xdd, 0x86, 0x1b, 0xb9, 0x24, 0x44, 0x4f, 0x40, 0xd1, 0xa9, 0xd7, 0x99, 0x74, 0x57, + 0x9a, 0x3f, 0x7e, 0x77, 0xbf, 0x5c, 0x9c, 0xab, 0x53, 0x31, 0x03, 0x14, 0xd5, 0x1e, 0xa6, 0x14, + 0xe8, 0x29, 0xe8, 0xab, 0x07, 0x7e, 0x6b, 0xba, 0xc0, 0x28, 0xe9, 0x2a, 0xef, 0x5b, 0x0c, 0xfc, + 0x56, 0x82, 0x94, 0xd1, 0xd8, 0xbf, 0x55, 0x80, 0x53, 0x0b, 0xa4, 0xb5, 0xbd, 0x5c, 0xcd, 0x39, + 0x2f, 0xce, 0xc3, 0x50, 0xd3, 0xf7, 0xdc, 0xc8, 0x0f, 0x42, 0x51, 0x35, 0x9b, 0x11, 0x6b, 0x02, + 0x86, 0x15, 0x16, 0x9d, 0x85, 0xbe, 0x56, 0x2c, 0xc4, 0x8e, 0x48, 0x01, 0x98, 0x89, 0xaf, 0x0c, + 0x43, 0x29, 0xda, 0x21, 0x09, 0xc4, 0x8c, 0x51, 0x14, 0xd7, 0x42, 0x12, 0x60, 0x86, 0x89, 0x25, + 0x01, 0x2a, 0x23, 0x88, 0x13, 0x21, 0x21, 0x09, 0x50, 0x0c, 0xd6, 0xa8, 0x50, 0x05, 0x4a, 0x61, + 0x62, 0x64, 0x7b, 0x5a, 0x9a, 0xa3, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, + 0xae, 0xa2, 0xc2, 0xd7, 0x0b, 0x80, 0x78, 0x17, 0x7e, 0x9b, 0x75, 0xdc, 0xb5, 0x74, 0xc7, 0xf5, + 0xbe, 0x24, 0x0e, 0xab, 0xf7, 0xfe, 0x8b, 0x05, 0xa7, 0x16, 0x5c, 0xaf, 0x4e, 0x82, 0x9c, 0x09, + 0xf8, 0x60, 0xee, 0xce, 0x07, 0x13, 0x52, 0x8c, 0x29, 0xd6, 0x77, 0x08, 0x53, 0xcc, 0xfe, 0x4b, + 0x0b, 0x10, 0xff, 0xec, 0x77, 0xdd, 0xc7, 0x5e, 0x4b, 0x7f, 0xec, 0x21, 0x4c, 0x0b, 0xfb, 0xef, + 0x5a, 0x30, 0xbc, 0xd0, 0x70, 0xdc, 0xa6, 0xf8, 0xd4, 0x05, 0x98, 0x94, 0x8a, 0x22, 0x06, 0xd6, + 0x64, 0x7f, 0xba, 0xb9, 0x4d, 0xe2, 0x24, 0x12, 0xa7, 0xe9, 0xd1, 0xc7, 0xe1, 0xa4, 0x01, 0xdc, + 0x20, 0xcd, 0x56, 0xc3, 0x89, 0xf4, 0x5b, 0x01, 0x3b, 0xfd, 0x71, 0x1e, 0x11, 0xce, 0x2f, 0x6f, + 0xaf, 0xc2, 0xd8, 0x42, 0xc3, 0x25, 0x5e, 0xb4, 0x52, 0x59, 0xf0, 0xbd, 0x4d, 0x77, 0x0b, 0xbd, + 0x0c, 0x63, 0x91, 0xdb, 0x24, 0x7e, 0x3b, 0xaa, 0x92, 0x9a, 0xef, 0xb1, 0xbb, 0xb6, 0x75, 0xbe, + 0x7f, 0x1e, 0xdd, 0xdd, 0x2f, 0x8f, 0x6d, 0x18, 0x18, 0x9c, 0xa0, 0xb4, 0x7f, 0x9a, 0xee, 0xb4, + 0x8d, 0x76, 0x18, 0x91, 0x60, 0x23, 0x68, 0x87, 0xd1, 0x7c, 0x9b, 0x4a, 0xcb, 0x95, 0xc0, 0xa7, + 0x1d, 0xe8, 0xfa, 0x1e, 0x3a, 0x65, 0x28, 0x10, 0x86, 0xa4, 0xf2, 0x40, 0x28, 0x0a, 0x66, 0x01, + 0x42, 0x77, 0xcb, 0x23, 0x81, 0xf6, 0x69, 0x63, 0x6c, 0x71, 0x2b, 0x28, 0xd6, 0x28, 0x50, 0x03, + 0x46, 0x1b, 0xce, 0x4d, 0xd2, 0xa8, 0x92, 0x06, 0xa9, 0x45, 0x7e, 0x20, 0x54, 0x20, 0xcf, 0xf7, + 0x76, 0x73, 0x59, 0xd5, 0x8b, 0xce, 0x4f, 0xde, 0xdd, 0x2f, 0x8f, 0x1a, 0x20, 0x6c, 0x32, 0xa7, + 0x9b, 0x9d, 0xdf, 0xa2, 0x5f, 0xe1, 0x34, 0xf4, 0xeb, 0xf2, 0x55, 0x01, 0xc3, 0x0a, 0xab, 0x36, + 0xbb, 0xbe, 0xbc, 0xcd, 0xce, 0xfe, 0x63, 0xba, 0x34, 0xfc, 0x66, 0xcb, 0xf7, 0x88, 0x17, 0x2d, + 0xf8, 0x5e, 0x9d, 0x2b, 0xaf, 0x5e, 0x86, 0xbe, 0x88, 0x4e, 0x75, 0xde, 0x3d, 0xe7, 0x64, 0x41, + 0x3a, 0xc1, 0xef, 0xed, 0x97, 0x4f, 0xa4, 0x4b, 0xb0, 0x25, 0xc0, 0xca, 0xa0, 0x0f, 0xc1, 0x40, + 0x18, 0x39, 0x51, 0x3b, 0x14, 0x1d, 0xf7, 0x88, 0x5c, 0x28, 0x55, 0x06, 0xbd, 0xb7, 0x5f, 0x1e, + 0x57, 0xc5, 0x38, 0x08, 0x8b, 0x02, 0xe8, 0x49, 0x18, 0x6c, 0x92, 0x30, 0x74, 0xb6, 0xa4, 0xa0, + 0x33, 0x2e, 0xca, 0x0e, 0xae, 0x71, 0x30, 0x96, 0x78, 0xf4, 0x28, 0xf4, 0x93, 0x20, 0xf0, 0x03, + 0xf1, 0x6d, 0xa3, 0x82, 0xb0, 0x7f, 0x89, 0x02, 0x31, 0xc7, 0xd9, 0xff, 0xd2, 0x82, 0x71, 0xd5, + 0x56, 0x5e, 0xd7, 0x11, 0x5c, 0x30, 0xdf, 0x04, 0xa8, 0xc9, 0x0f, 0x0c, 0x99, 0x60, 0x30, 0xfc, + 0xdc, 0xb9, 0x4c, 0x19, 0x2c, 0xd5, 0x8d, 0x31, 0x67, 0x05, 0x0a, 0xb1, 0xc6, 0xcd, 0xfe, 0x75, + 0x0b, 0xa6, 0x12, 0x5f, 0xb4, 0xea, 0x86, 0x11, 0x7a, 0x2b, 0xf5, 0x55, 0xb3, 0x3d, 0x4e, 0x3e, + 0x37, 0xe4, 0xdf, 0xa4, 0x76, 0x29, 0x09, 0xd1, 0xbe, 0xe8, 0x32, 0xf4, 0xbb, 0x11, 0x69, 0xca, + 0x8f, 0x79, 0xb4, 0xe3, 0xc7, 0xf0, 0x56, 0xc5, 0x23, 0xb2, 0x42, 0x4b, 0x62, 0xce, 0xc0, 0xfe, + 0xad, 0x22, 0x94, 0xf8, 0xfa, 0x5e, 0x73, 0x5a, 0x47, 0x30, 0x16, 0x4f, 0x43, 0xc9, 0x6d, 0x36, + 0xdb, 0x91, 0x73, 0x53, 0x9c, 0xd4, 0x43, 0x7c, 0xd7, 0x5c, 0x91, 0x40, 0x1c, 0xe3, 0xd1, 0x0a, + 0xf4, 0xb1, 0xa6, 0xf0, 0xaf, 0x7c, 0x22, 0xfb, 0x2b, 0x45, 0xdb, 0x67, 0x17, 0x9d, 0xc8, 0xe1, + 0x42, 0xb2, 0x5a, 0x57, 0x14, 0x84, 0x19, 0x0b, 0xe4, 0x00, 0xdc, 0x74, 0x3d, 0x27, 0xd8, 0xa3, + 0xb0, 0xe9, 0x22, 0x63, 0xf8, 0x6c, 0x67, 0x86, 0xf3, 0x8a, 0x9e, 0xb3, 0x55, 0x1f, 0x16, 0x23, + 0xb0, 0xc6, 0x74, 0xe6, 0x83, 0x50, 0x52, 0xc4, 0x07, 0x91, 0x75, 0x67, 0x3e, 0x0c, 0xe3, 0x89, + 0xba, 0xba, 0x15, 0x1f, 0xd1, 0x45, 0xe5, 0x5f, 0x65, 0x5b, 0x86, 0x68, 0xf5, 0x92, 0xb7, 0x2b, + 0x8e, 0x98, 0x3b, 0x70, 0xac, 0x91, 0x71, 0x48, 0x89, 0x71, 0xed, 0xfd, 0x50, 0x3b, 0x25, 0x3e, + 0xfb, 0x58, 0x16, 0x16, 0x67, 0xd6, 0x61, 0xec, 0x88, 0x85, 0x4e, 0x3b, 0x22, 0xdd, 0xef, 0x8e, + 0xa9, 0xc6, 0x5f, 0x21, 0x7b, 0x6a, 0x53, 0xfd, 0x56, 0x36, 0xff, 0x34, 0xef, 0x7d, 0xbe, 0x5d, + 0x0e, 0x0b, 0x06, 0xc5, 0x2b, 0x64, 0x8f, 0x0f, 0x85, 0xfe, 0x75, 0xc5, 0x8e, 0x5f, 0xf7, 0x55, + 0x0b, 0x46, 0xd5, 0xd7, 0x1d, 0xc1, 0xbe, 0x30, 0x6f, 0xee, 0x0b, 0xa7, 0x3b, 0x4e, 0xf0, 0x9c, + 0x1d, 0xe1, 0xeb, 0x05, 0x38, 0xa9, 0x68, 0xe8, 0xb5, 0x8f, 0xff, 0x11, 0xb3, 0xea, 0x02, 0x94, + 0x3c, 0xa5, 0x00, 0xb5, 0x4c, 0xcd, 0x63, 0xac, 0xfe, 0x8c, 0x69, 0xe8, 0x91, 0xe7, 0xc5, 0x87, + 0xf6, 0x88, 0x6e, 0x19, 0x10, 0x87, 0xfb, 0x3c, 0x14, 0xdb, 0x6e, 0x5d, 0x1c, 0x30, 0xef, 0x97, + 0xbd, 0x7d, 0x6d, 0x65, 0xf1, 0xde, 0x7e, 0xf9, 0x91, 0x3c, 0xab, 0x14, 0x3d, 0xd9, 0xc2, 0xd9, + 0x6b, 0x2b, 0x8b, 0x98, 0x16, 0x46, 0x73, 0x30, 0x2e, 0x45, 0x99, 0xeb, 0x54, 0x92, 0xf6, 0x3d, + 0x71, 0x0e, 0x29, 0xf5, 0x3e, 0x36, 0xd1, 0x38, 0x49, 0x8f, 0x16, 0x61, 0x62, 0xa7, 0x7d, 0x93, + 0x34, 0x48, 0xc4, 0x3f, 0xf8, 0x0a, 0xe1, 0xca, 0xef, 0x52, 0x7c, 0xe9, 0xbe, 0x92, 0xc0, 0xe3, + 0x54, 0x09, 0xfb, 0x6f, 0xd8, 0x79, 0x20, 0x7a, 0x4f, 0x93, 0x6f, 0xbe, 0x95, 0xd3, 0xb9, 0x97, + 0x59, 0x71, 0x85, 0xec, 0x6d, 0xf8, 0x54, 0x0e, 0xc9, 0x9e, 0x15, 0xc6, 0x9c, 0xef, 0xeb, 0x38, + 0xe7, 0x7f, 0xb1, 0x00, 0xc7, 0x55, 0x0f, 0x18, 0xf2, 0xfd, 0xb7, 0x7b, 0x1f, 0x5c, 0x84, 0xe1, + 0x3a, 0xd9, 0x74, 0xda, 0x8d, 0x48, 0x59, 0x62, 0xfa, 0xb9, 0x35, 0x6e, 0x31, 0x06, 0x63, 0x9d, + 0xe6, 0x00, 0xdd, 0xf6, 0xf3, 0xa3, 0xec, 0x20, 0x8e, 0x1c, 0x3a, 0xc7, 0xd5, 0xaa, 0xb1, 0x72, + 0x57, 0xcd, 0xa3, 0xd0, 0xef, 0x36, 0xa9, 0x60, 0x56, 0x30, 0xe5, 0xad, 0x15, 0x0a, 0xc4, 0x1c, + 0x87, 0x1e, 0x87, 0xc1, 0x9a, 0xdf, 0x6c, 0x3a, 0x5e, 0x9d, 0x1d, 0x79, 0xa5, 0xf9, 0x61, 0x2a, + 0xbb, 0x2d, 0x70, 0x10, 0x96, 0x38, 0x2a, 0x7c, 0x3b, 0xc1, 0x16, 0x57, 0x4f, 0x09, 0xe1, 0x7b, + 0x2e, 0xd8, 0x0a, 0x31, 0x83, 0xd2, 0xdb, 0xf5, 0x2d, 0x3f, 0xd8, 0x71, 0xbd, 0xad, 0x45, 0x37, + 0x10, 0x4b, 0x42, 0x9d, 0x85, 0x37, 0x14, 0x06, 0x6b, 0x54, 0x68, 0x19, 0xfa, 0x5b, 0x7e, 0x10, + 0x85, 0xd3, 0x03, 0xac, 0xbb, 0x1f, 0xc9, 0xd9, 0x88, 0xf8, 0xd7, 0x56, 0xfc, 0x20, 0x8a, 0x3f, + 0x80, 0xfe, 0x0b, 0x31, 0x2f, 0x8e, 0x56, 0x61, 0x90, 0x78, 0xbb, 0xcb, 0x81, 0xdf, 0x9c, 0x9e, + 0xca, 0xe7, 0xb4, 0xc4, 0x49, 0xf8, 0x34, 0x8b, 0x65, 0x54, 0x01, 0xc6, 0x92, 0x05, 0xfa, 0x10, + 0x14, 0x89, 0xb7, 0x3b, 0x3d, 0xc8, 0x38, 0xcd, 0xe4, 0x70, 0xba, 0xee, 0x04, 0xf1, 0x9e, 0xbf, + 0xe4, 0xed, 0x62, 0x5a, 0x06, 0x7d, 0x0c, 0x4a, 0x72, 0xc3, 0x08, 0x85, 0xde, 0x37, 0x73, 0xc2, + 0xca, 0x6d, 0x06, 0x93, 0xb7, 0xdb, 0x6e, 0x40, 0x9a, 0xc4, 0x8b, 0xc2, 0x78, 0x87, 0x94, 0xd8, + 0x10, 0xc7, 0xdc, 0x50, 0x0d, 0x46, 0x02, 0x12, 0xba, 0x77, 0x48, 0xc5, 0x6f, 0xb8, 0xb5, 0xbd, + 0xe9, 0x87, 0x58, 0xf3, 0x9e, 0xec, 0xd8, 0x65, 0x58, 0x2b, 0x10, 0xdb, 0x25, 0x74, 0x28, 0x36, + 0x98, 0xa2, 0x37, 0x60, 0x34, 0x20, 0x61, 0xe4, 0x04, 0x91, 0xa8, 0x65, 0x5a, 0xd9, 0x11, 0x47, + 0xb1, 0x8e, 0xe0, 0xd7, 0x89, 0xb8, 0x9a, 0x18, 0x83, 0x4d, 0x0e, 0xe8, 0x63, 0xd2, 0x48, 0xb2, + 0xe6, 0xb7, 0xbd, 0x28, 0x9c, 0x2e, 0xb1, 0x76, 0x67, 0x9a, 0xaf, 0xaf, 0xc7, 0x74, 0x49, 0x2b, + 0x0a, 0x2f, 0x8c, 0x0d, 0x56, 0xe8, 0x13, 0x30, 0xca, 0xff, 0x73, 0x23, 0x70, 0x38, 0x7d, 0x9c, + 0xf1, 0x3e, 0x9b, 0xcf, 0x9b, 0x13, 0xce, 0x1f, 0x17, 0xcc, 0x47, 0x75, 0x68, 0x88, 0x4d, 0x6e, + 0x08, 0xc3, 0x68, 0xc3, 0xdd, 0x25, 0x1e, 0x09, 0xc3, 0x4a, 0xe0, 0xdf, 0x24, 0x42, 0xa7, 0x7d, + 0x32, 0xdb, 0x68, 0xec, 0xdf, 0x24, 0xe2, 0x12, 0xa8, 0x97, 0xc1, 0x26, 0x0b, 0x74, 0x0d, 0xc6, + 0x02, 0xe2, 0xd4, 0xdd, 0x98, 0xe9, 0x70, 0x37, 0xa6, 0xec, 0xe2, 0x8c, 0x8d, 0x42, 0x38, 0xc1, + 0x04, 0x5d, 0x85, 0x11, 0xd6, 0xe7, 0xed, 0x16, 0x67, 0x7a, 0xa2, 0x1b, 0x53, 0xe6, 0x73, 0x50, + 0xd5, 0x8a, 0x60, 0x83, 0x01, 0x7a, 0x1d, 0x4a, 0x0d, 0x77, 0x93, 0xd4, 0xf6, 0x6a, 0x0d, 0x32, + 0x3d, 0xc2, 0xb8, 0x65, 0x6e, 0x86, 0xab, 0x92, 0x88, 0xcb, 0xe7, 0xea, 0x2f, 0x8e, 0x8b, 0xa3, + 0xeb, 0x70, 0x22, 0x22, 0x41, 0xd3, 0xf5, 0x1c, 0xba, 0x89, 0x89, 0x2b, 0x21, 0xb3, 0xe5, 0x8f, + 0xb2, 0xd9, 0x75, 0x46, 0x8c, 0xc6, 0x89, 0x8d, 0x4c, 0x2a, 0x9c, 0x53, 0x1a, 0xdd, 0x86, 0xe9, + 0x0c, 0x0c, 0x9f, 0xb7, 0xc7, 0x18, 0xe7, 0x57, 0x05, 0xe7, 0xe9, 0x8d, 0x1c, 0xba, 0x7b, 0x1d, + 0x70, 0x38, 0x97, 0x3b, 0xba, 0x0a, 0xe3, 0x6c, 0xe7, 0xac, 0xb4, 0x1b, 0x0d, 0x51, 0xe1, 0x18, + 0xab, 0xf0, 0x71, 0x29, 0x47, 0xac, 0x98, 0xe8, 0x7b, 0xfb, 0x65, 0x88, 0xff, 0xe1, 0x64, 0x69, + 0x74, 0x93, 0x99, 0x8d, 0xdb, 0x81, 0x1b, 0xed, 0xd1, 0x55, 0x45, 0x6e, 0x47, 0xd3, 0xe3, 0x1d, + 0x55, 0x68, 0x3a, 0xa9, 0xb2, 0x2d, 0xeb, 0x40, 0x9c, 0x64, 0x48, 0x8f, 0x82, 0x30, 0xaa, 0xbb, + 0xde, 0xf4, 0x04, 0xbf, 0x4f, 0xc9, 0x9d, 0xb4, 0x4a, 0x81, 0x98, 0xe3, 0x98, 0xc9, 0x98, 0xfe, + 0xb8, 0x4a, 0x4f, 0xdc, 0x49, 0x46, 0x18, 0x9b, 0x8c, 0x25, 0x02, 0xc7, 0x34, 0x54, 0x08, 0x8e, + 0xa2, 0xbd, 0x69, 0xc4, 0x48, 0xd5, 0x86, 0xb8, 0xb1, 0xf1, 0x31, 0x4c, 0xe1, 0xf6, 0x4d, 0x18, + 0x53, 0xdb, 0x04, 0xeb, 0x13, 0x54, 0x86, 0x7e, 0x26, 0xf6, 0x09, 0x85, 0x6f, 0x89, 0x36, 0x81, + 0x89, 0x84, 0x98, 0xc3, 0x59, 0x13, 0xdc, 0x3b, 0x64, 0x7e, 0x2f, 0x22, 0x5c, 0x17, 0x51, 0xd4, + 0x9a, 0x20, 0x11, 0x38, 0xa6, 0xb1, 0xff, 0x27, 0x17, 0x9f, 0xe3, 0x53, 0xa2, 0x87, 0x73, 0xf1, + 0x19, 0x18, 0xda, 0xf6, 0xc3, 0x88, 0x52, 0xb3, 0x3a, 0xfa, 0x63, 0x81, 0xf9, 0xb2, 0x80, 0x63, + 0x45, 0x81, 0x5e, 0x81, 0xd1, 0x9a, 0x5e, 0x81, 0x38, 0xd4, 0xd5, 0x36, 0x62, 0xd4, 0x8e, 0x4d, + 0x5a, 0xf4, 0x12, 0x0c, 0x31, 0x37, 0xa8, 0x9a, 0xdf, 0x10, 0xd2, 0xa6, 0x94, 0x4c, 0x86, 0x2a, + 0x02, 0x7e, 0x4f, 0xfb, 0x8d, 0x15, 0x35, 0x3a, 0x07, 0x03, 0xb4, 0x09, 0x2b, 0x15, 0x71, 0x9c, + 0x2a, 0xdd, 0xe5, 0x65, 0x06, 0xc5, 0x02, 0x6b, 0xff, 0xba, 0xc5, 0x64, 0xa9, 0xf4, 0x9e, 0x8f, + 0x2e, 0xb3, 0x43, 0x83, 0x9d, 0x20, 0x9a, 0xee, 0xf0, 0x31, 0xed, 0x24, 0x50, 0xb8, 0x7b, 0x89, + 0xff, 0xd8, 0x28, 0x89, 0xde, 0x4c, 0x9e, 0x0c, 0x5c, 0xa0, 0x78, 0x41, 0x76, 0x41, 0xf2, 0x74, + 0x78, 0x38, 0x3e, 0xe2, 0x68, 0x7b, 0x3a, 0x1d, 0x11, 0xf6, 0xff, 0x55, 0xd0, 0x66, 0x49, 0x35, + 0x72, 0x22, 0x82, 0x2a, 0x30, 0x78, 0xcb, 0x71, 0x23, 0xd7, 0xdb, 0x12, 0x72, 0x5f, 0xe7, 0x83, + 0x8e, 0x15, 0xba, 0xc1, 0x0b, 0x70, 0xe9, 0x45, 0xfc, 0xc1, 0x92, 0x0d, 0xe5, 0x18, 0xb4, 0x3d, + 0x8f, 0x72, 0x2c, 0xf4, 0xca, 0x11, 0xf3, 0x02, 0x9c, 0xa3, 0xf8, 0x83, 0x25, 0x1b, 0xf4, 0x16, + 0x80, 0xdc, 0x21, 0x48, 0x5d, 0xe8, 0x0e, 0x9f, 0xe9, 0xce, 0x74, 0x43, 0x95, 0xe1, 0xca, 0xc9, + 0xf8, 0x3f, 0xd6, 0xf8, 0xd9, 0x91, 0x36, 0xa6, 0x7a, 0x63, 0xd0, 0xc7, 0xe9, 0x12, 0x75, 0x82, + 0x88, 0xd4, 0xe7, 0x22, 0xd1, 0x39, 0x4f, 0xf5, 0x76, 0x39, 0xdc, 0x70, 0x9b, 0x44, 0x5f, 0xce, + 0x82, 0x09, 0x8e, 0xf9, 0xd9, 0xbf, 0x5c, 0x84, 0xe9, 0xbc, 0xe6, 0xd2, 0x45, 0x43, 0x6e, 0xbb, + 0xd1, 0x02, 0x15, 0x6b, 0x2d, 0x73, 0xd1, 0x2c, 0x09, 0x38, 0x56, 0x14, 0x74, 0xf6, 0x86, 0xee, + 0x96, 0xbc, 0xdb, 0xf7, 0xc7, 0xb3, 0xb7, 0xca, 0xa0, 0x58, 0x60, 0x29, 0x5d, 0x40, 0x9c, 0x50, + 0xf8, 0xe7, 0x69, 0xb3, 0x1c, 0x33, 0x28, 0x16, 0x58, 0x5d, 0xcb, 0xd8, 0xd7, 0x45, 0xcb, 0x68, + 0x74, 0x51, 0xff, 0xe1, 0x76, 0x11, 0xfa, 0x24, 0xc0, 0xa6, 0xeb, 0xb9, 0xe1, 0x36, 0xe3, 0x3e, + 0x70, 0x60, 0xee, 0x4a, 0x28, 0x5e, 0x56, 0x5c, 0xb0, 0xc6, 0x11, 0xbd, 0x08, 0xc3, 0x6a, 0x03, + 0x59, 0x59, 0x64, 0xce, 0x0a, 0x9a, 0xf3, 0x57, 0xbc, 0x9b, 0x2e, 0x62, 0x9d, 0xce, 0xfe, 0x74, + 0x72, 0xbe, 0x88, 0x15, 0xa0, 0xf5, 0xaf, 0xd5, 0x6b, 0xff, 0x16, 0x3a, 0xf7, 0xaf, 0xfd, 0xcd, + 0x01, 0x18, 0x37, 0x2a, 0x6b, 0x87, 0x3d, 0xec, 0xb9, 0x97, 0xe8, 0x01, 0xe4, 0x44, 0x44, 0xac, + 0x3f, 0xbb, 0xfb, 0x52, 0xd1, 0x0f, 0x29, 0xba, 0x02, 0x78, 0x79, 0xf4, 0x49, 0x28, 0x35, 0x9c, + 0x90, 0x69, 0x2c, 0x89, 0x58, 0x77, 0xbd, 0x30, 0x8b, 0x2f, 0x84, 0x4e, 0x18, 0x69, 0xa7, 0x3e, + 0xe7, 0x1d, 0xb3, 0xa4, 0x27, 0x25, 0x95, 0xaf, 0xa4, 0x03, 0xa8, 0x6a, 0x04, 0x15, 0xc2, 0xf6, + 0x30, 0xc7, 0xa1, 0x97, 0xd8, 0xd6, 0x4a, 0x67, 0xc5, 0x02, 0x95, 0x46, 0xd9, 0x34, 0xeb, 0x37, + 0x84, 0x6c, 0x85, 0xc3, 0x06, 0x65, 0x7c, 0x27, 0x1b, 0xe8, 0x70, 0x27, 0x7b, 0x12, 0x06, 0xd9, + 0x0f, 0x35, 0x03, 0xd4, 0x68, 0xac, 0x70, 0x30, 0x96, 0xf8, 0xe4, 0x84, 0x19, 0xea, 0x6d, 0xc2, + 0xd0, 0x5b, 0x9f, 0x98, 0xd4, 0xcc, 0x51, 0x64, 0x88, 0xef, 0x72, 0x62, 0xca, 0x63, 0x89, 0x43, + 0x3f, 0x63, 0x01, 0x72, 0x1a, 0xf4, 0xb6, 0x4c, 0xc1, 0xea, 0x72, 0x03, 0x4c, 0xd4, 0x7e, 0xa5, + 0x6b, 0xb7, 0xb7, 0xc3, 0xd9, 0xb9, 0x54, 0x69, 0xae, 0x29, 0x7d, 0x59, 0x34, 0x11, 0xa5, 0x09, + 0xf4, 0xc3, 0x68, 0xd5, 0x0d, 0xa3, 0xcf, 0xfd, 0x49, 0xe2, 0x70, 0xca, 0x68, 0x12, 0xba, 0xa6, + 0x5f, 0xbe, 0x86, 0x0f, 0x78, 0xf9, 0x1a, 0xcd, 0xbb, 0x78, 0xcd, 0xb4, 0xe1, 0xa1, 0x9c, 0x2f, + 0xc8, 0xd0, 0xbf, 0x2e, 0xea, 0xfa, 0xd7, 0x2e, 0x5a, 0xbb, 0x59, 0x59, 0xc7, 0xec, 0x1b, 0x6d, + 0xc7, 0x8b, 0xdc, 0x68, 0x4f, 0xd7, 0xd7, 0x3e, 0x05, 0x63, 0x8b, 0x0e, 0x69, 0xfa, 0xde, 0x92, + 0x57, 0x6f, 0xf9, 0xae, 0x17, 0xa1, 0x69, 0xe8, 0x63, 0xc2, 0x07, 0xdf, 0x7a, 0xfb, 0x68, 0xef, + 0x61, 0x06, 0xb1, 0xb7, 0xe0, 0xf8, 0xa2, 0x7f, 0xcb, 0xbb, 0xe5, 0x04, 0xf5, 0xb9, 0xca, 0x8a, + 0xa6, 0x4f, 0x5a, 0x97, 0xfa, 0x0c, 0x2b, 0xff, 0xb6, 0xa8, 0x95, 0xe4, 0xd7, 0xa1, 0x65, 0xb7, + 0x41, 0x72, 0xb4, 0x7e, 0xff, 0x6f, 0xc1, 0xa8, 0x29, 0xa6, 0x57, 0x36, 0x2b, 0x2b, 0xd7, 0x40, + 0xff, 0x06, 0x0c, 0x6d, 0xba, 0xa4, 0x51, 0xc7, 0x64, 0x53, 0xf4, 0xce, 0x13, 0xf9, 0x2e, 0x7c, + 0xcb, 0x94, 0x52, 0x19, 0xd7, 0x98, 0x36, 0x64, 0x59, 0x14, 0xc6, 0x8a, 0x0d, 0xda, 0x81, 0x09, + 0xd9, 0x87, 0x12, 0x2b, 0xf6, 0x83, 0x27, 0x3b, 0x0d, 0xbc, 0xc9, 0xfc, 0xd8, 0xdd, 0xfd, 0xf2, + 0x04, 0x4e, 0xb0, 0xc1, 0x29, 0xc6, 0xe8, 0x14, 0xf4, 0x35, 0xe9, 0xc9, 0xd7, 0xc7, 0xba, 0x9f, + 0xa9, 0x3f, 0x98, 0x26, 0x87, 0x41, 0xed, 0x1f, 0xb3, 0xe0, 0xa1, 0x54, 0xcf, 0x08, 0x8d, 0xd6, + 0x21, 0x8f, 0x42, 0x52, 0xc3, 0x54, 0xe8, 0xae, 0x61, 0xb2, 0xff, 0x8e, 0x05, 0xc7, 0x96, 0x9a, + 0xad, 0x68, 0x6f, 0xd1, 0x35, 0xad, 0xe9, 0x1f, 0x84, 0x81, 0x26, 0xa9, 0xbb, 0xed, 0xa6, 0x18, + 0xb9, 0xb2, 0x3c, 0x1d, 0xd6, 0x18, 0xf4, 0xde, 0x7e, 0x79, 0xb4, 0x1a, 0xf9, 0x81, 0xb3, 0x45, + 0x38, 0x00, 0x0b, 0x72, 0x76, 0xc6, 0xba, 0x77, 0xc8, 0xaa, 0xdb, 0x74, 0xa3, 0xfb, 0x9b, 0xed, + 0xc2, 0x10, 0x2e, 0x99, 0xe0, 0x98, 0x9f, 0xfd, 0x0d, 0x0b, 0xc6, 0xe5, 0xbc, 0x9f, 0xab, 0xd7, + 0x03, 0x12, 0x86, 0x68, 0x06, 0x0a, 0x6e, 0x4b, 0xb4, 0x12, 0x44, 0x2b, 0x0b, 0x2b, 0x15, 0x5c, + 0x70, 0x5b, 0x52, 0x9c, 0x67, 0x07, 0x50, 0xd1, 0xf4, 0x09, 0xb8, 0x2c, 0xe0, 0x58, 0x51, 0xa0, + 0xf3, 0x30, 0xe4, 0xf9, 0x75, 0x2e, 0x11, 0x0b, 0x1b, 0x2b, 0xa5, 0x5c, 0x17, 0x30, 0xac, 0xb0, + 0xa8, 0x02, 0x25, 0xee, 0x31, 0x1a, 0x4f, 0xda, 0x9e, 0xfc, 0x4e, 0xd9, 0x97, 0x6d, 0xc8, 0x92, + 0x38, 0x66, 0x62, 0xff, 0xa6, 0x05, 0x23, 0xf2, 0xcb, 0x7a, 0xbc, 0xab, 0xd0, 0xa5, 0x15, 0xdf, + 0x53, 0xe2, 0xa5, 0x45, 0xef, 0x1a, 0x0c, 0x63, 0x5c, 0x31, 0x8a, 0x07, 0xba, 0x62, 0x5c, 0x84, + 0x61, 0xa7, 0xd5, 0xaa, 0x98, 0xf7, 0x13, 0x36, 0x95, 0xe6, 0x62, 0x30, 0xd6, 0x69, 0xec, 0x1f, + 0x2d, 0xc0, 0x98, 0xfc, 0x82, 0x6a, 0xfb, 0x66, 0x48, 0x22, 0xb4, 0x01, 0x25, 0x87, 0x8f, 0x12, + 0x91, 0x93, 0xfc, 0xd1, 0x6c, 0xbd, 0x99, 0x31, 0xa4, 0xb1, 0xa0, 0x35, 0x27, 0x4b, 0xe3, 0x98, + 0x11, 0x6a, 0xc0, 0xa4, 0xe7, 0x47, 0xec, 0xd0, 0x55, 0xf8, 0x4e, 0xa6, 0xcc, 0x24, 0xf7, 0x93, + 0x82, 0xfb, 0xe4, 0x7a, 0x92, 0x0b, 0x4e, 0x33, 0x46, 0x4b, 0x52, 0x17, 0x59, 0xcc, 0x57, 0x22, + 0xe9, 0x03, 0x97, 0xad, 0x8a, 0xb4, 0x7f, 0xcd, 0x82, 0x92, 0x24, 0x3b, 0x0a, 0xab, 0xf5, 0x1a, + 0x0c, 0x86, 0x6c, 0x10, 0x64, 0xd7, 0xd8, 0x9d, 0x1a, 0xce, 0xc7, 0x2b, 0x96, 0x25, 0xf8, 0xff, + 0x10, 0x4b, 0x1e, 0xcc, 0x14, 0xa5, 0x9a, 0xff, 0x2e, 0x31, 0x45, 0xa9, 0xf6, 0xe4, 0x1c, 0x4a, + 0x7f, 0xc6, 0xda, 0xac, 0xe9, 0x76, 0xa9, 0xc8, 0xdb, 0x0a, 0xc8, 0xa6, 0x7b, 0x3b, 0x29, 0xf2, + 0x56, 0x18, 0x14, 0x0b, 0x2c, 0x7a, 0x0b, 0x46, 0x6a, 0xd2, 0x06, 0x11, 0xaf, 0xf0, 0x73, 0x1d, + 0xed, 0x61, 0xca, 0x74, 0xca, 0x75, 0x68, 0x0b, 0x5a, 0x79, 0x6c, 0x70, 0x33, 0x3d, 0xa2, 0x8a, + 0xdd, 0x3c, 0xa2, 0x62, 0xbe, 0xf9, 0xfe, 0x41, 0x3f, 0x6e, 0xc1, 0x00, 0xd7, 0x3d, 0xf7, 0xa6, + 0xfa, 0xd7, 0x2c, 0xc9, 0x71, 0xdf, 0x5d, 0xa7, 0x40, 0x21, 0x69, 0xa0, 0x35, 0x28, 0xb1, 0x1f, + 0x4c, 0x77, 0x5e, 0xcc, 0x7f, 0xb0, 0xc4, 0x6b, 0xd5, 0x1b, 0x78, 0x5d, 0x16, 0xc3, 0x31, 0x07, + 0xfb, 0x47, 0x8a, 0x74, 0x77, 0x8b, 0x49, 0x8d, 0x43, 0xdf, 0x7a, 0x70, 0x87, 0x7e, 0xe1, 0x41, + 0x1d, 0xfa, 0x5b, 0x30, 0x5e, 0xd3, 0xec, 0xce, 0xf1, 0x48, 0x9e, 0xef, 0x38, 0x49, 0x34, 0x13, + 0x35, 0xd7, 0xce, 0x2d, 0x98, 0x4c, 0x70, 0x92, 0x2b, 0xfa, 0x38, 0x8c, 0xf0, 0x71, 0x16, 0xb5, + 0x70, 0xa7, 0xb2, 0xc7, 0xf3, 0xe7, 0x8b, 0x5e, 0x05, 0xd7, 0xe6, 0x6a, 0xc5, 0xb1, 0xc1, 0xcc, + 0xfe, 0x2b, 0x0b, 0xd0, 0x52, 0x6b, 0x9b, 0x34, 0x49, 0xe0, 0x34, 0x62, 0xf3, 0xd1, 0x17, 0x2d, + 0x98, 0x26, 0x29, 0xf0, 0x82, 0xdf, 0x6c, 0x8a, 0xcb, 0x62, 0x8e, 0x3e, 0x63, 0x29, 0xa7, 0x8c, + 0x7a, 0xd1, 0x35, 0x9d, 0x47, 0x81, 0x73, 0xeb, 0x43, 0x6b, 0x30, 0xc5, 0x4f, 0x49, 0x85, 0xd0, + 0xbc, 0xb8, 0x1e, 0x16, 0x8c, 0xa7, 0x36, 0xd2, 0x24, 0x38, 0xab, 0x9c, 0xfd, 0x6b, 0xa3, 0x90, + 0xdb, 0x8a, 0xf7, 0xec, 0x66, 0xef, 0xd9, 0xcd, 0xde, 0xb3, 0x9b, 0xbd, 0x67, 0x37, 0x7b, 0xcf, + 0x6e, 0xf6, 0x9e, 0xdd, 0xec, 0x5d, 0x6a, 0x37, 0xfb, 0xbf, 0x2d, 0x38, 0xae, 0x8e, 0x2f, 0xe3, + 0xc2, 0xfe, 0x19, 0x98, 0xe2, 0xcb, 0xcd, 0x70, 0xc6, 0x16, 0xc7, 0xf5, 0xc5, 0xcc, 0x99, 0x9b, + 0x78, 0x34, 0x60, 0x14, 0xe4, 0xaf, 0xaf, 0x32, 0x10, 0x38, 0xab, 0x1a, 0xfb, 0x97, 0x87, 0xa0, + 0x7f, 0x69, 0x97, 0x78, 0xd1, 0x11, 0x5c, 0x6d, 0x6a, 0x30, 0xe6, 0x7a, 0xbb, 0x7e, 0x63, 0x97, + 0xd4, 0x39, 0xfe, 0x20, 0x37, 0xf0, 0x13, 0x82, 0xf5, 0xd8, 0x8a, 0xc1, 0x02, 0x27, 0x58, 0x3e, + 0x08, 0xeb, 0xc3, 0x25, 0x18, 0xe0, 0x87, 0x8f, 0x30, 0x3d, 0x64, 0xee, 0xd9, 0xac, 0x13, 0xc5, + 0x91, 0x1a, 0x5b, 0x46, 0xf8, 0xe1, 0x26, 0x8a, 0xa3, 0x4f, 0xc3, 0xd8, 0xa6, 0x1b, 0x84, 0xd1, + 0x86, 0xdb, 0xa4, 0x47, 0x43, 0xb3, 0x75, 0x1f, 0xd6, 0x06, 0xd5, 0x0f, 0xcb, 0x06, 0x27, 0x9c, + 0xe0, 0x8c, 0xb6, 0x60, 0xb4, 0xe1, 0xe8, 0x55, 0x0d, 0x1e, 0xb8, 0x2a, 0x75, 0x3a, 0xac, 0xea, + 0x8c, 0xb0, 0xc9, 0x97, 0x2e, 0xa7, 0x1a, 0x53, 0x98, 0x0f, 0x31, 0x75, 0x86, 0x5a, 0x4e, 0x5c, + 0x53, 0xce, 0x71, 0x54, 0x40, 0x63, 0x8e, 0xec, 0x25, 0x53, 0x40, 0xd3, 0xdc, 0xd5, 0x3f, 0x05, + 0x25, 0x42, 0xbb, 0x90, 0x32, 0x16, 0x07, 0xcc, 0x85, 0xde, 0xda, 0xba, 0xe6, 0xd6, 0x02, 0xdf, + 0xb4, 0xf3, 0x2c, 0x49, 0x4e, 0x38, 0x66, 0x8a, 0x16, 0x60, 0x20, 0x24, 0x81, 0xab, 0x74, 0xc9, + 0x1d, 0x86, 0x91, 0x91, 0xf1, 0xe7, 0x7d, 0xfc, 0x37, 0x16, 0x45, 0xe9, 0xf4, 0x72, 0x98, 0x2a, + 0x96, 0x1d, 0x06, 0xda, 0xf4, 0x9a, 0x63, 0x50, 0x2c, 0xb0, 0xe8, 0x75, 0x18, 0x0c, 0x48, 0x83, + 0x19, 0x12, 0x47, 0x7b, 0x9f, 0xe4, 0xdc, 0x2e, 0xc9, 0xcb, 0x61, 0xc9, 0x00, 0x5d, 0x01, 0x14, + 0x10, 0x2a, 0xe0, 0xb9, 0xde, 0x96, 0x72, 0xef, 0x16, 0x1b, 0xad, 0x12, 0xa4, 0x71, 0x4c, 0x21, + 0x5f, 0x76, 0xe2, 0x8c, 0x62, 0xe8, 0x12, 0x4c, 0x2a, 0xe8, 0x8a, 0x17, 0x46, 0x0e, 0xdd, 0xe0, + 0xc6, 0x19, 0x2f, 0xa5, 0x5f, 0xc1, 0x49, 0x02, 0x9c, 0x2e, 0x63, 0xff, 0x9c, 0x05, 0xbc, 0x9f, + 0x8f, 0x40, 0xab, 0xf0, 0x9a, 0xa9, 0x55, 0x38, 0x99, 0x3b, 0x72, 0x39, 0x1a, 0x85, 0x9f, 0xb3, + 0x60, 0x58, 0x1b, 0xd9, 0x78, 0xce, 0x5a, 0x1d, 0xe6, 0x6c, 0x1b, 0x26, 0xe8, 0x4c, 0xbf, 0x7a, + 0x33, 0x24, 0xc1, 0x2e, 0xa9, 0xb3, 0x89, 0x59, 0xb8, 0xbf, 0x89, 0xa9, 0x5c, 0x49, 0x57, 0x13, + 0x0c, 0x71, 0xaa, 0x0a, 0xfb, 0x53, 0xb2, 0xa9, 0xca, 0xf3, 0xb6, 0xa6, 0xc6, 0x3c, 0xe1, 0x79, + 0xab, 0x46, 0x15, 0xc7, 0x34, 0x74, 0xa9, 0x6d, 0xfb, 0x61, 0x94, 0xf4, 0xbc, 0xbd, 0xec, 0x87, + 0x11, 0x66, 0x18, 0xfb, 0x79, 0x80, 0xa5, 0xdb, 0xa4, 0xc6, 0x67, 0xac, 0x7e, 0xe9, 0xb1, 0xf2, + 0x2f, 0x3d, 0xf6, 0x1f, 0x58, 0x30, 0xb6, 0xbc, 0x60, 0x9c, 0x5c, 0xb3, 0x00, 0xfc, 0xa6, 0x76, + 0xe3, 0xc6, 0xba, 0x74, 0xff, 0xe0, 0x16, 0x70, 0x05, 0xc5, 0x1a, 0x05, 0x3a, 0x09, 0xc5, 0x46, + 0xdb, 0x13, 0x6a, 0xcf, 0x41, 0x7a, 0x3c, 0xae, 0xb6, 0x3d, 0x4c, 0x61, 0xda, 0xab, 0xae, 0x62, + 0xcf, 0xaf, 0xba, 0xba, 0x46, 0x73, 0x41, 0x65, 0xe8, 0xbf, 0x75, 0xcb, 0xad, 0xf3, 0x37, 0xf3, + 0xc2, 0x35, 0xe5, 0xc6, 0x8d, 0x95, 0xc5, 0x10, 0x73, 0xb8, 0xfd, 0xa5, 0x22, 0xcc, 0x2c, 0x37, + 0xc8, 0xed, 0x77, 0x18, 0x37, 0xa0, 0xd7, 0x37, 0x69, 0x07, 0x53, 0x20, 0x1d, 0xf4, 0xdd, 0x61, + 0xf7, 0xfe, 0xd8, 0x84, 0x41, 0xee, 0x78, 0x2a, 0xa3, 0x08, 0x64, 0x9a, 0xfb, 0xf2, 0x3b, 0x64, + 0x96, 0x3b, 0xb0, 0x0a, 0x73, 0x9f, 0x3a, 0x30, 0x05, 0x14, 0x4b, 0xe6, 0x33, 0x2f, 0xc3, 0x88, + 0x4e, 0x79, 0xa0, 0x17, 0xc0, 0xdf, 0x5b, 0x84, 0x09, 0xda, 0x82, 0x07, 0x3a, 0x10, 0xd7, 0xd2, + 0x03, 0x71, 0xd8, 0xaf, 0x40, 0xbb, 0x8f, 0xc6, 0x5b, 0xc9, 0xd1, 0xb8, 0x98, 0x37, 0x1a, 0x47, + 0x3d, 0x06, 0xdf, 0x67, 0xc1, 0xd4, 0x72, 0xc3, 0xaf, 0xed, 0x24, 0x5e, 0x6a, 0xbe, 0x08, 0xc3, + 0x74, 0x3b, 0x0e, 0x8d, 0xa0, 0x25, 0x46, 0x18, 0x1b, 0x81, 0xc2, 0x3a, 0x9d, 0x56, 0xec, 0xda, + 0xb5, 0x95, 0xc5, 0xac, 0xe8, 0x37, 0x02, 0x85, 0x75, 0x3a, 0xfb, 0xf7, 0x2c, 0x38, 0x7d, 0x69, + 0x61, 0x29, 0x9e, 0x8a, 0xa9, 0x00, 0x3c, 0xe7, 0x60, 0xa0, 0x55, 0xd7, 0x9a, 0x12, 0xab, 0x85, + 0x17, 0x59, 0x2b, 0x04, 0xf6, 0xdd, 0x12, 0x5c, 0xea, 0x1a, 0xc0, 0x25, 0x5c, 0x59, 0x10, 0xfb, + 0xae, 0xb4, 0x02, 0x59, 0xb9, 0x56, 0xa0, 0xc7, 0x61, 0x90, 0x9e, 0x0b, 0x6e, 0x4d, 0xb6, 0x9b, + 0x1b, 0xf4, 0x39, 0x08, 0x4b, 0x9c, 0xfd, 0xb3, 0x16, 0x4c, 0x5d, 0x72, 0x23, 0x7a, 0x68, 0x27, + 0x23, 0xcc, 0xd0, 0x53, 0x3b, 0x74, 0x23, 0x3f, 0xd8, 0x4b, 0x46, 0x98, 0xc1, 0x0a, 0x83, 0x35, + 0x2a, 0xfe, 0x41, 0xbb, 0x2e, 0x7b, 0x49, 0x51, 0x30, 0xed, 0x6e, 0x58, 0xc0, 0xb1, 0xa2, 0xa0, + 0xfd, 0x55, 0x77, 0x03, 0xa6, 0xb2, 0xdc, 0x13, 0x1b, 0xb7, 0xea, 0xaf, 0x45, 0x89, 0xc0, 0x31, + 0x8d, 0xfd, 0x17, 0x16, 0x94, 0x2f, 0xf1, 0xf7, 0xa0, 0x9b, 0x61, 0xce, 0xa6, 0xfb, 0x3c, 0x94, + 0x88, 0x34, 0x10, 0xc8, 0xb7, 0xb1, 0x52, 0x10, 0x55, 0x96, 0x03, 0x1e, 0xe8, 0x46, 0xd1, 0xf5, + 0xf0, 0x9c, 0xfc, 0x60, 0xef, 0x81, 0x97, 0x01, 0x11, 0xbd, 0x2e, 0x3d, 0xf2, 0x0f, 0x0b, 0x21, + 0xb2, 0x94, 0xc2, 0xe2, 0x8c, 0x12, 0xf6, 0x8f, 0x59, 0x70, 0x5c, 0x7d, 0xf0, 0xbb, 0xee, 0x33, + 0xed, 0xaf, 0x15, 0x60, 0xf4, 0xf2, 0xc6, 0x46, 0xe5, 0x12, 0x89, 0xb4, 0x59, 0xd9, 0xd9, 0xec, + 0x8f, 0x35, 0xeb, 0x65, 0xa7, 0x3b, 0x62, 0x3b, 0x72, 0x1b, 0xb3, 0x3c, 0x80, 0xdc, 0xec, 0x8a, + 0x17, 0x5d, 0x0d, 0xaa, 0x51, 0xe0, 0x7a, 0x5b, 0x99, 0x33, 0x5d, 0xca, 0x2c, 0xc5, 0x3c, 0x99, + 0x05, 0x3d, 0x0f, 0x03, 0x2c, 0x82, 0x9d, 0x1c, 0x84, 0x87, 0xd5, 0x15, 0x8b, 0x41, 0xef, 0xed, + 0x97, 0x4b, 0xd7, 0xf0, 0x0a, 0xff, 0x83, 0x05, 0x29, 0xba, 0x06, 0xc3, 0xdb, 0x51, 0xd4, 0xba, + 0x4c, 0x9c, 0x3a, 0x09, 0xe4, 0x2e, 0x7b, 0x26, 0x6b, 0x97, 0xa5, 0x9d, 0xc0, 0xc9, 0xe2, 0x8d, + 0x29, 0x86, 0x85, 0x58, 0xe7, 0x63, 0x57, 0x01, 0x62, 0xdc, 0x21, 0x19, 0x6e, 0xec, 0x0d, 0x28, + 0xd1, 0xcf, 0x9d, 0x6b, 0xb8, 0x4e, 0x67, 0xd3, 0xf8, 0xd3, 0x50, 0x92, 0x86, 0xef, 0x50, 0x84, + 0xbb, 0x60, 0x27, 0x92, 0xb4, 0x8b, 0x87, 0x38, 0xc6, 0xdb, 0x8f, 0x81, 0xf0, 0x2d, 0xed, 0xc4, + 0xd2, 0xde, 0x84, 0x63, 0xcc, 0x49, 0xd6, 0x89, 0xb6, 0x8d, 0x39, 0xda, 0x7d, 0x32, 0x3c, 0x23, + 0xee, 0x75, 0xfc, 0xcb, 0xa6, 0xb5, 0xc7, 0xc9, 0x23, 0x92, 0x63, 0x7c, 0xc7, 0xb3, 0xff, 0xbc, + 0x0f, 0x1e, 0x5e, 0xa9, 0xe6, 0xc7, 0x69, 0x7a, 0x09, 0x46, 0xb8, 0xb8, 0x48, 0xa7, 0x86, 0xd3, + 0x10, 0xf5, 0x2a, 0x0d, 0xe8, 0x86, 0x86, 0xc3, 0x06, 0x25, 0x3a, 0x0d, 0x45, 0xf7, 0x6d, 0x2f, + 0xf9, 0x74, 0x6f, 0xe5, 0x8d, 0x75, 0x4c, 0xe1, 0x14, 0x4d, 0x25, 0x4f, 0xbe, 0xa5, 0x2b, 0xb4, + 0x92, 0x3e, 0x5f, 0x83, 0x31, 0x37, 0xac, 0x85, 0xee, 0x8a, 0x47, 0xd7, 0xa9, 0xb6, 0xd2, 0x95, + 0xce, 0x81, 0x36, 0x5a, 0x61, 0x71, 0x82, 0x5a, 0x3b, 0x5f, 0xfa, 0x7b, 0x96, 0x5e, 0xbb, 0x46, + 0x89, 0xa0, 0xdb, 0x7f, 0x8b, 0x7d, 0x5d, 0xc8, 0x54, 0xf0, 0x62, 0xfb, 0xe7, 0x1f, 0x1c, 0x62, + 0x89, 0xa3, 0x17, 0xba, 0xda, 0xb6, 0xd3, 0x9a, 0x6b, 0x47, 0xdb, 0x8b, 0x6e, 0x58, 0xf3, 0x77, + 0x49, 0xb0, 0xc7, 0xee, 0xe2, 0x43, 0xf1, 0x85, 0x4e, 0x21, 0x16, 0x2e, 0xcf, 0x55, 0x28, 0x25, + 0x4e, 0x97, 0x41, 0x73, 0x30, 0x2e, 0x81, 0x55, 0x12, 0xb2, 0x23, 0x60, 0x98, 0xb1, 0x51, 0x8f, + 0xe9, 0x04, 0x58, 0x31, 0x49, 0xd2, 0x9b, 0x02, 0x2e, 0x1c, 0x86, 0x80, 0xfb, 0x41, 0x18, 0x75, + 0x3d, 0x37, 0x72, 0x9d, 0xc8, 0xe7, 0xf6, 0x23, 0x7e, 0xed, 0x66, 0x0a, 0xe6, 0x15, 0x1d, 0x81, + 0x4d, 0x3a, 0xfb, 0xdf, 0xf7, 0xc1, 0x24, 0x1b, 0xb6, 0xf7, 0x66, 0xd8, 0x77, 0xd2, 0x0c, 0xbb, + 0x96, 0x9e, 0x61, 0x87, 0x21, 0xb9, 0xdf, 0xf7, 0x34, 0xfb, 0x34, 0x94, 0xd4, 0xfb, 0x41, 0xf9, + 0x80, 0xd8, 0xca, 0x79, 0x40, 0xdc, 0xfd, 0xf4, 0x96, 0x2e, 0x69, 0xc5, 0x4c, 0x97, 0xb4, 0xaf, + 0x58, 0x10, 0x1b, 0x16, 0xd0, 0x1b, 0x50, 0x6a, 0xf9, 0xcc, 0xc3, 0x35, 0x90, 0x6e, 0xe3, 0x8f, + 0x75, 0xb4, 0x4c, 0xf0, 0x50, 0x75, 0x01, 0xef, 0x85, 0x8a, 0x2c, 0x8a, 0x63, 0x2e, 0xe8, 0x0a, + 0x0c, 0xb6, 0x02, 0x52, 0x8d, 0x58, 0x1c, 0xa5, 0xde, 0x19, 0xf2, 0x59, 0xc3, 0x0b, 0x62, 0xc9, + 0xc1, 0xfe, 0x85, 0x02, 0x4c, 0x24, 0x49, 0xd1, 0xab, 0xd0, 0x47, 0x6e, 0x93, 0x9a, 0x68, 0x6f, + 0xe6, 0x51, 0x1c, 0xab, 0x26, 0x78, 0x07, 0xd0, 0xff, 0x98, 0x95, 0x42, 0x97, 0x61, 0x90, 0x9e, + 0xc3, 0x97, 0x54, 0xcc, 0xc0, 0x47, 0xf2, 0xce, 0x72, 0x25, 0xd0, 0xf0, 0xc6, 0x09, 0x10, 0x96, + 0xc5, 0x99, 0x1f, 0x58, 0xad, 0x55, 0xa5, 0x57, 0x9c, 0xa8, 0xd3, 0x4d, 0x7c, 0x63, 0xa1, 0xc2, + 0x89, 0x04, 0x37, 0xee, 0x07, 0x26, 0x81, 0x38, 0x66, 0x82, 0x3e, 0x02, 0xfd, 0x61, 0x83, 0x90, + 0x96, 0x30, 0xf4, 0x67, 0x2a, 0x17, 0xab, 0x94, 0x40, 0x70, 0x62, 0xca, 0x08, 0x06, 0xc0, 0xbc, + 0xa0, 0xfd, 0x8b, 0x16, 0x00, 0x77, 0x9c, 0x73, 0xbc, 0x2d, 0x72, 0x04, 0xfa, 0xf8, 0x45, 0xe8, + 0x0b, 0x5b, 0xa4, 0xd6, 0xc9, 0x7d, 0x3b, 0x6e, 0x4f, 0xb5, 0x45, 0x6a, 0xf1, 0x9c, 0xa5, 0xff, + 0x30, 0x2b, 0x6d, 0x7f, 0x3f, 0xc0, 0x58, 0x4c, 0xb6, 0x12, 0x91, 0x26, 0x7a, 0xd6, 0x08, 0x5b, + 0x72, 0x32, 0x11, 0xb6, 0xa4, 0xc4, 0xa8, 0x35, 0xd5, 0xef, 0xa7, 0xa1, 0xd8, 0x74, 0x6e, 0x0b, + 0xdd, 0xde, 0xd3, 0x9d, 0x9b, 0x41, 0xf9, 0xcf, 0xae, 0x39, 0xb7, 0xf9, 0xf5, 0xf7, 0x69, 0xb9, + 0xc6, 0xd6, 0x9c, 0xdb, 0x5d, 0x5d, 0x8c, 0x69, 0x25, 0xac, 0x2e, 0xd7, 0x13, 0x3e, 0x61, 0x3d, + 0xd5, 0xe5, 0x7a, 0xc9, 0xba, 0x5c, 0xaf, 0x87, 0xba, 0x5c, 0x0f, 0xdd, 0x81, 0x41, 0xe1, 0xb2, + 0x29, 0x22, 0xc0, 0x5d, 0xe8, 0xa1, 0x3e, 0xe1, 0xf1, 0xc9, 0xeb, 0xbc, 0x20, 0xaf, 0xf7, 0x02, + 0xda, 0xb5, 0x5e, 0x59, 0x21, 0xfa, 0x7f, 0x2c, 0x18, 0x13, 0xbf, 0x31, 0x79, 0xbb, 0x4d, 0xc2, + 0x48, 0x88, 0xbf, 0x1f, 0xe8, 0xbd, 0x0d, 0xa2, 0x20, 0x6f, 0xca, 0x07, 0xe4, 0x49, 0x65, 0x22, + 0xbb, 0xb6, 0x28, 0xd1, 0x0a, 0xf4, 0x0b, 0x16, 0x1c, 0x6b, 0x3a, 0xb7, 0x79, 0x8d, 0x1c, 0x86, + 0x9d, 0xc8, 0xf5, 0x85, 0xeb, 0xc3, 0xab, 0xbd, 0x0d, 0x7f, 0xaa, 0x38, 0x6f, 0xa4, 0xb4, 0x73, + 0x1e, 0xcb, 0x22, 0xe9, 0xda, 0xd4, 0xcc, 0x76, 0xcd, 0x6c, 0xc2, 0x90, 0x9c, 0x6f, 0x0f, 0xd2, + 0x3f, 0x9c, 0xd5, 0x23, 0xe6, 0xda, 0x03, 0xad, 0xe7, 0xd3, 0x30, 0xa2, 0xcf, 0xb1, 0x07, 0x5a, + 0xd7, 0xdb, 0x30, 0x95, 0x31, 0x97, 0x1e, 0x68, 0x95, 0xb7, 0xe0, 0x64, 0xee, 0xfc, 0x78, 0xa0, + 0xfe, 0xfd, 0x5f, 0xb3, 0xf4, 0x7d, 0xf0, 0x08, 0x8c, 0x22, 0x0b, 0xa6, 0x51, 0xe4, 0x4c, 0xe7, + 0x95, 0x93, 0x63, 0x19, 0x79, 0x4b, 0x6f, 0x34, 0xdd, 0xd5, 0xd1, 0xeb, 0x30, 0xd0, 0xa0, 0x10, + 0xe9, 0xf8, 0x6b, 0x77, 0x5f, 0x91, 0xb1, 0x38, 0xca, 0xe0, 0x21, 0x16, 0x1c, 0xec, 0x5f, 0xb1, + 0xa0, 0xef, 0x08, 0x7a, 0x02, 0x9b, 0x3d, 0xf1, 0x6c, 0x2e, 0x6b, 0x11, 0x0c, 0x7f, 0x16, 0x3b, + 0xb7, 0x96, 0x6e, 0x47, 0xc4, 0x0b, 0xd9, 0x99, 0x9e, 0xd9, 0x31, 0xfb, 0x16, 0x4c, 0xad, 0xfa, + 0x4e, 0x7d, 0xde, 0x69, 0x38, 0x5e, 0x8d, 0x04, 0x2b, 0xde, 0xd6, 0x81, 0xbc, 0xd6, 0x0b, 0x5d, + 0xbd, 0xd6, 0x5f, 0x82, 0x01, 0xb7, 0xa5, 0x05, 0xf7, 0x3e, 0x4b, 0x3b, 0x70, 0xa5, 0x22, 0xe2, + 0x7a, 0x23, 0xa3, 0x72, 0x06, 0xc5, 0x82, 0x9e, 0x8e, 0x3c, 0x77, 0x17, 0xeb, 0xcb, 0x1f, 0x79, + 0x2a, 0xc5, 0x27, 0x43, 0x40, 0x19, 0x8e, 0xcd, 0xdb, 0x60, 0x54, 0x21, 0x5e, 0x7d, 0x61, 0x18, + 0x74, 0xf9, 0x97, 0x8a, 0xe1, 0x7f, 0x22, 0x5b, 0xba, 0x4e, 0x75, 0x8c, 0xf6, 0x9e, 0x89, 0x03, + 0xb0, 0x64, 0x64, 0xbf, 0x04, 0x99, 0x21, 0x3b, 0xba, 0x6b, 0x4e, 0xec, 0x8f, 0xc1, 0x24, 0x2b, + 0x79, 0x40, 0xad, 0x84, 0x9d, 0xd0, 0xf7, 0x66, 0xc4, 0x69, 0xb5, 0xff, 0x8d, 0x05, 0x68, 0xcd, + 0xaf, 0xbb, 0x9b, 0x7b, 0x82, 0x39, 0xff, 0xfe, 0xb7, 0xa1, 0xcc, 0xaf, 0x7d, 0xc9, 0x58, 0xa6, + 0x0b, 0x0d, 0x27, 0x0c, 0x35, 0x5d, 0xf3, 0x13, 0xa2, 0xde, 0xf2, 0x46, 0x67, 0x72, 0xdc, 0x8d, + 0x1f, 0x7a, 0x23, 0x11, 0xa8, 0xed, 0x43, 0xa9, 0x40, 0x6d, 0x4f, 0x64, 0x7a, 0x7c, 0xa4, 0x5b, + 0x2f, 0x03, 0xb8, 0xd9, 0x5f, 0xb0, 0x60, 0x7c, 0x3d, 0x11, 0x9b, 0xf3, 0x1c, 0x33, 0x7f, 0x67, + 0xd8, 0x50, 0xaa, 0x0c, 0x8a, 0x05, 0xf6, 0xd0, 0x75, 0x8c, 0x7f, 0x63, 0x41, 0x1c, 0x22, 0xe8, + 0x08, 0xa4, 0xda, 0x05, 0x43, 0xaa, 0xcd, 0xbc, 0x21, 0xa8, 0xe6, 0xe4, 0x09, 0xb5, 0xe8, 0x8a, + 0x1a, 0x93, 0x0e, 0x97, 0x83, 0x98, 0x0d, 0x5f, 0x67, 0x63, 0xe6, 0xc0, 0xa9, 0xd1, 0xf8, 0xc3, + 0x02, 0x20, 0x45, 0xdb, 0x73, 0x70, 0xbf, 0x74, 0x89, 0xc3, 0x09, 0xee, 0xb7, 0x0b, 0x88, 0x39, + 0x70, 0x04, 0x8e, 0x17, 0x72, 0xb6, 0xae, 0xd0, 0xaa, 0x1e, 0xcc, 0x3b, 0x64, 0x46, 0xbe, 0xf6, + 0x5b, 0x4d, 0x71, 0xc3, 0x19, 0x35, 0x68, 0x8e, 0x39, 0xfd, 0xbd, 0x3a, 0xe6, 0x0c, 0x74, 0x79, + 0xb6, 0xfa, 0x55, 0x0b, 0x46, 0x55, 0x37, 0xbd, 0x4b, 0x1e, 0x37, 0xa8, 0xf6, 0xe4, 0x9c, 0x2b, + 0x15, 0xad, 0xc9, 0xec, 0xbc, 0xfd, 0x2e, 0xf6, 0xfc, 0xd8, 0x69, 0xb8, 0x77, 0x88, 0x8a, 0x9a, + 0x5b, 0x16, 0xcf, 0x89, 0x05, 0xf4, 0xde, 0x7e, 0x79, 0x54, 0xfd, 0xe3, 0x51, 0x2f, 0xe3, 0x22, + 0xf6, 0x4f, 0xd1, 0xc5, 0x6e, 0x4e, 0x45, 0xf4, 0x22, 0xf4, 0xb7, 0xb6, 0x9d, 0x90, 0x24, 0x1e, + 0x81, 0xf5, 0x57, 0x28, 0xf0, 0xde, 0x7e, 0x79, 0x4c, 0x15, 0x60, 0x10, 0xcc, 0xa9, 0x7b, 0x0f, + 0x99, 0x98, 0x9e, 0x9c, 0x5d, 0x43, 0x26, 0xfe, 0x95, 0x05, 0x7d, 0xeb, 0xf4, 0xf4, 0x7a, 0xf0, + 0x5b, 0xc0, 0x6b, 0xc6, 0x16, 0x70, 0x2a, 0x2f, 0x61, 0x4b, 0xee, 0xea, 0x5f, 0x4e, 0xac, 0xfe, + 0x33, 0xb9, 0x1c, 0x3a, 0x2f, 0xfc, 0x26, 0x0c, 0xb3, 0x34, 0x30, 0xe2, 0xc1, 0xdb, 0xf3, 0xc6, + 0x82, 0x2f, 0x27, 0x16, 0xfc, 0xb8, 0x46, 0xaa, 0xad, 0xf4, 0x27, 0x61, 0x50, 0xbc, 0xa0, 0x4a, + 0xbe, 0xe2, 0x16, 0xb4, 0x58, 0xe2, 0xed, 0x1f, 0x2f, 0x82, 0x91, 0x76, 0x06, 0xfd, 0x9a, 0x05, + 0xb3, 0x01, 0xf7, 0xac, 0xae, 0x2f, 0xb6, 0x03, 0xd7, 0xdb, 0xaa, 0xd6, 0xb6, 0x49, 0xbd, 0xdd, + 0x70, 0xbd, 0xad, 0x95, 0x2d, 0xcf, 0x57, 0xe0, 0xa5, 0xdb, 0xa4, 0xd6, 0x66, 0x56, 0xcf, 0x2e, + 0x39, 0x6e, 0xd4, 0x0b, 0x85, 0xe7, 0xee, 0xee, 0x97, 0x67, 0xf1, 0x81, 0x78, 0xe3, 0x03, 0xb6, + 0x05, 0xfd, 0x9e, 0x05, 0x17, 0x78, 0x36, 0x96, 0xde, 0xdb, 0xdf, 0x41, 0x89, 0x50, 0x91, 0xac, + 0x62, 0x26, 0x1b, 0x24, 0x68, 0xce, 0x7f, 0x50, 0x74, 0xe8, 0x85, 0xca, 0xc1, 0xea, 0xc2, 0x07, + 0x6d, 0x9c, 0xfd, 0x8f, 0x8a, 0x30, 0x2a, 0x42, 0xeb, 0x89, 0x33, 0xe0, 0x45, 0x63, 0x4a, 0x3c, + 0x92, 0x98, 0x12, 0x93, 0x06, 0xf1, 0xe1, 0x6c, 0xff, 0x21, 0x4c, 0xd2, 0xcd, 0xf9, 0x32, 0x71, + 0x82, 0xe8, 0x26, 0x71, 0xb8, 0xbf, 0x5d, 0xf1, 0xc0, 0xbb, 0xbf, 0x52, 0xfc, 0xae, 0x26, 0x99, + 0xe1, 0x34, 0xff, 0xef, 0xa4, 0x33, 0xc7, 0x83, 0x89, 0x54, 0x74, 0xc4, 0x37, 0xa1, 0xa4, 0x9e, + 0xff, 0x88, 0x4d, 0xa7, 0x73, 0x90, 0xd1, 0x24, 0x07, 0xae, 0x57, 0x8c, 0x9f, 0x9e, 0xc5, 0xec, + 0xec, 0xbf, 0x57, 0x30, 0x2a, 0xe4, 0x83, 0xb8, 0x0e, 0x43, 0x4e, 0xc8, 0x02, 0x1f, 0xd7, 0x3b, + 0xa9, 0x7e, 0x53, 0xd5, 0xb0, 0x27, 0x58, 0x73, 0xa2, 0x24, 0x56, 0x3c, 0xd0, 0x65, 0xee, 0xd5, + 0xb8, 0x4b, 0x3a, 0xe9, 0x7d, 0x53, 0xdc, 0x40, 0xfa, 0x3d, 0xee, 0x12, 0x2c, 0xca, 0xa3, 0x4f, + 0x70, 0xb7, 0xd3, 0x2b, 0x9e, 0x7f, 0xcb, 0xbb, 0xe4, 0xfb, 0x32, 0x8c, 0x4a, 0x6f, 0x0c, 0x27, + 0xa5, 0xb3, 0xa9, 0x2a, 0x8e, 0x4d, 0x6e, 0xbd, 0x85, 0x1b, 0xfe, 0x0c, 0xb0, 0xec, 0x13, 0xe6, + 0x6b, 0xfb, 0x10, 0x11, 0x18, 0x17, 0x71, 0x1b, 0x25, 0x4c, 0xf4, 0x5d, 0xe6, 0x0d, 0xd7, 0x2c, + 0x1d, 0x5b, 0x28, 0xae, 0x98, 0x2c, 0x70, 0x92, 0xa7, 0xfd, 0x33, 0x16, 0xb0, 0x97, 0xc7, 0x47, + 0x20, 0x8f, 0x7c, 0xd8, 0x94, 0x47, 0xa6, 0xf3, 0x3a, 0x39, 0x47, 0x14, 0x79, 0x81, 0xcf, 0xac, + 0x4a, 0xe0, 0xdf, 0xde, 0x13, 0xbe, 0x42, 0xdd, 0x2f, 0x57, 0xf6, 0x7f, 0xb7, 0xf8, 0x26, 0x16, + 0xc7, 0x69, 0xf8, 0x2c, 0x0c, 0xd5, 0x9c, 0x96, 0x53, 0xe3, 0x39, 0xd2, 0x72, 0x15, 0x9d, 0x46, + 0xa1, 0xd9, 0x05, 0x51, 0x82, 0x2b, 0xee, 0x64, 0xfc, 0xcf, 0x21, 0x09, 0xee, 0xaa, 0xac, 0x53, + 0x55, 0xce, 0xec, 0xc0, 0xa8, 0xc1, 0xec, 0x81, 0x6a, 0x79, 0x3e, 0xcb, 0x8f, 0x58, 0x15, 0xaf, + 0xb6, 0x09, 0x93, 0x9e, 0xf6, 0x9f, 0x1e, 0x28, 0xf2, 0xe6, 0xfc, 0x58, 0xb7, 0x43, 0x94, 0x9d, + 0x3e, 0xda, 0xa3, 0xe6, 0x04, 0x1b, 0x9c, 0xe6, 0x6c, 0xff, 0x84, 0x05, 0x0f, 0xe9, 0x84, 0xda, + 0xbb, 0xa9, 0x6e, 0xd6, 0xa7, 0x45, 0x18, 0xf2, 0x5b, 0x24, 0x70, 0x22, 0x3f, 0x10, 0xa7, 0xc6, + 0x79, 0xd9, 0xe9, 0x57, 0x05, 0xfc, 0x9e, 0xc8, 0xf8, 0x21, 0xb9, 0x4b, 0x38, 0x56, 0x25, 0xe9, + 0xd5, 0x9a, 0x75, 0x46, 0x28, 0x5e, 0xc8, 0xb1, 0x3d, 0x80, 0x39, 0x32, 0x84, 0x58, 0x60, 0xec, + 0x3f, 0xb7, 0xf8, 0xc4, 0xd2, 0x9b, 0x8e, 0xde, 0x86, 0x89, 0xa6, 0x13, 0xd5, 0xb6, 0x97, 0x6e, + 0xb7, 0x02, 0x6e, 0xcb, 0x93, 0xfd, 0xf4, 0x74, 0xb7, 0x7e, 0xd2, 0x3e, 0x32, 0xf6, 0xa4, 0x5d, + 0x4b, 0x30, 0xc3, 0x29, 0xf6, 0xe8, 0x26, 0x0c, 0x33, 0x18, 0x7b, 0xfc, 0x19, 0x76, 0x12, 0x0d, + 0xf2, 0x6a, 0x53, 0xbe, 0x20, 0x6b, 0x31, 0x1f, 0xac, 0x33, 0xb5, 0xbf, 0x52, 0xe4, 0xab, 0x9d, + 0x89, 0xf2, 0x4f, 0xc2, 0x60, 0xcb, 0xaf, 0x2f, 0xac, 0x2c, 0x62, 0x31, 0x0a, 0xea, 0x18, 0xa9, + 0x70, 0x30, 0x96, 0x78, 0x74, 0x1e, 0x86, 0xc4, 0x4f, 0x69, 0x7b, 0x65, 0x7b, 0xb3, 0xa0, 0x0b, + 0xb1, 0xc2, 0xa2, 0xe7, 0x00, 0x5a, 0x81, 0xbf, 0xeb, 0xd6, 0x59, 0x30, 0x98, 0xa2, 0xe9, 0xc6, + 0x55, 0x51, 0x18, 0xac, 0x51, 0xa1, 0x57, 0x60, 0xb4, 0xed, 0x85, 0x5c, 0x1c, 0xd1, 0x42, 0x6e, + 0x2b, 0x07, 0xa3, 0x6b, 0x3a, 0x12, 0x9b, 0xb4, 0x68, 0x0e, 0x06, 0x22, 0x87, 0xb9, 0x25, 0xf5, + 0xe7, 0x7b, 0x5b, 0x6f, 0x50, 0x0a, 0x3d, 0x1d, 0x17, 0x2d, 0x80, 0x45, 0x41, 0xf4, 0xa6, 0x7c, + 0x87, 0xcd, 0x37, 0x76, 0xf1, 0xcc, 0xa1, 0xb7, 0x43, 0x40, 0x7b, 0x85, 0x2d, 0x9e, 0x4f, 0x18, + 0xbc, 0xd0, 0xcb, 0x00, 0xe4, 0x76, 0x44, 0x02, 0xcf, 0x69, 0x28, 0x67, 0x42, 0x25, 0x17, 0x2c, + 0xfa, 0xeb, 0x7e, 0x74, 0x2d, 0x24, 0x4b, 0x8a, 0x02, 0x6b, 0xd4, 0xf6, 0xef, 0x95, 0x00, 0x62, + 0xb9, 0x1d, 0xdd, 0x49, 0x6d, 0x5c, 0xcf, 0x74, 0x96, 0xf4, 0x0f, 0x6f, 0xd7, 0x42, 0x9f, 0xb7, + 0x60, 0x58, 0xc4, 0xbc, 0x61, 0x23, 0x54, 0xe8, 0xbc, 0x71, 0x9a, 0xa1, 0x77, 0x68, 0x09, 0xde, + 0x84, 0xe7, 0xe5, 0x0c, 0xd5, 0x30, 0x5d, 0x5b, 0xa1, 0x57, 0x8c, 0xde, 0x2f, 0xaf, 0x8a, 0x45, + 0xa3, 0x2b, 0xd5, 0x55, 0xb1, 0xc4, 0xce, 0x08, 0xfd, 0x96, 0x78, 0xcd, 0xb8, 0x25, 0xf6, 0xe5, + 0x3f, 0x34, 0x35, 0xc4, 0xd7, 0x6e, 0x17, 0x44, 0x54, 0xd1, 0x83, 0x4e, 0xf4, 0xe7, 0xbf, 0x8e, + 0xd4, 0xee, 0x49, 0x5d, 0x02, 0x4e, 0x7c, 0x1a, 0xc6, 0xeb, 0xa6, 0x10, 0x20, 0x66, 0xe2, 0x13, + 0x79, 0x7c, 0x13, 0x32, 0x43, 0x7c, 0xec, 0x27, 0x10, 0x38, 0xc9, 0x18, 0x55, 0x78, 0x0c, 0x92, + 0x15, 0x6f, 0xd3, 0x17, 0x4f, 0x6d, 0xec, 0xdc, 0xb1, 0xdc, 0x0b, 0x23, 0xd2, 0xa4, 0x94, 0xf1, + 0xe9, 0xbe, 0x2e, 0xca, 0x62, 0xc5, 0x05, 0xbd, 0x0e, 0x03, 0xec, 0x79, 0x5c, 0x38, 0x3d, 0x94, + 0xaf, 0x88, 0x37, 0x83, 0x31, 0xc6, 0x0b, 0x92, 0xfd, 0x0d, 0xb1, 0xe0, 0x80, 0x2e, 0xcb, 0xc7, + 0xa7, 0xe1, 0x8a, 0x77, 0x2d, 0x24, 0xec, 0xf1, 0x69, 0x69, 0xfe, 0xb1, 0xf8, 0x5d, 0x29, 0x87, + 0x67, 0x26, 0xed, 0x34, 0x4a, 0x52, 0x29, 0x4a, 0xfc, 0x97, 0xb9, 0x40, 0x45, 0xe8, 0xa8, 0xcc, + 0xe6, 0x99, 0xf9, 0x42, 0xe3, 0xee, 0xbc, 0x6e, 0xb2, 0xc0, 0x49, 0x9e, 0x54, 0x22, 0xe5, 0xab, + 0x5e, 0x3c, 0xd6, 0xe9, 0xb6, 0x77, 0xf0, 0x8b, 0x38, 0x3b, 0x8d, 0x38, 0x04, 0x8b, 0xf2, 0x47, + 0x2a, 0x1e, 0xcc, 0x78, 0x30, 0x91, 0x5c, 0xa2, 0x0f, 0x54, 0x1c, 0xf9, 0xd3, 0x3e, 0x18, 0x33, + 0xa7, 0x14, 0xba, 0x00, 0x25, 0xc1, 0x44, 0xe5, 0xd3, 0x51, 0xab, 0x64, 0x4d, 0x22, 0x70, 0x4c, + 0xc3, 0xd2, 0x28, 0xb1, 0xe2, 0x9a, 0x77, 0x76, 0x9c, 0x46, 0x49, 0x61, 0xb0, 0x46, 0x45, 0x2f, + 0x56, 0x37, 0x7d, 0x3f, 0x52, 0x07, 0x92, 0x9a, 0x77, 0xf3, 0x0c, 0x8a, 0x05, 0x96, 0x1e, 0x44, + 0x3b, 0x24, 0xf0, 0x48, 0xc3, 0x0c, 0xcf, 0xae, 0x0e, 0xa2, 0x2b, 0x3a, 0x12, 0x9b, 0xb4, 0xf4, + 0x38, 0xf5, 0x43, 0x36, 0x91, 0xc5, 0xf5, 0x2d, 0xf6, 0x76, 0xaf, 0xf2, 0x77, 0xfb, 0x12, 0x8f, + 0x3e, 0x06, 0x0f, 0xa9, 0x50, 0x68, 0x98, 0x1b, 0x79, 0x64, 0x8d, 0x03, 0x86, 0xb6, 0xe5, 0xa1, + 0x85, 0x6c, 0x32, 0x9c, 0x57, 0x1e, 0xbd, 0x06, 0x63, 0x42, 0xc4, 0x97, 0x1c, 0x07, 0x4d, 0xd7, + 0xad, 0x2b, 0x06, 0x16, 0x27, 0xa8, 0x65, 0x80, 0x79, 0x26, 0x65, 0x4b, 0x0e, 0x43, 0xe9, 0x00, + 0xf3, 0x3a, 0x1e, 0xa7, 0x4a, 0xa0, 0x39, 0x18, 0xe7, 0x32, 0x98, 0xeb, 0x6d, 0xf1, 0x31, 0x11, + 0x6f, 0xe9, 0xd4, 0x92, 0xba, 0x6a, 0xa2, 0x71, 0x92, 0x1e, 0xbd, 0x04, 0x23, 0x4e, 0x50, 0xdb, + 0x76, 0x23, 0x52, 0x8b, 0xda, 0x01, 0x7f, 0x64, 0xa7, 0xf9, 0xbe, 0xcd, 0x69, 0x38, 0x6c, 0x50, + 0xda, 0x77, 0x60, 0x2a, 0x23, 0xa0, 0x07, 0x9d, 0x38, 0x4e, 0xcb, 0x95, 0xdf, 0x94, 0x70, 0x30, + 0x9f, 0xab, 0xac, 0xc8, 0xaf, 0xd1, 0xa8, 0xe8, 0xec, 0x64, 0x81, 0x3f, 0xb4, 0xd4, 0xbf, 0x6a, + 0x76, 0x2e, 0x4b, 0x04, 0x8e, 0x69, 0xec, 0xff, 0x54, 0x80, 0xf1, 0x0c, 0xc3, 0x11, 0x4b, 0x3f, + 0x9b, 0xb8, 0xa4, 0xc4, 0xd9, 0x66, 0xcd, 0x7c, 0x05, 0x85, 0x03, 0xe4, 0x2b, 0x28, 0x76, 0xcb, + 0x57, 0xd0, 0xf7, 0x4e, 0xf2, 0x15, 0x98, 0x3d, 0xd6, 0xdf, 0x53, 0x8f, 0x65, 0xe4, 0x38, 0x18, + 0x38, 0x60, 0x8e, 0x03, 0xa3, 0xd3, 0x07, 0x7b, 0xe8, 0xf4, 0x1f, 0x29, 0xc0, 0x44, 0xd2, 0xe6, + 0x74, 0x04, 0x7a, 0xdb, 0xd7, 0x0d, 0xbd, 0xed, 0xf9, 0x5e, 0xde, 0x3e, 0xe7, 0xea, 0x70, 0x71, + 0x42, 0x87, 0xfb, 0x54, 0x4f, 0xdc, 0x3a, 0xeb, 0x73, 0x7f, 0xb2, 0x00, 0xc7, 0x33, 0x4d, 0x71, + 0x47, 0xd0, 0x37, 0x57, 0x8d, 0xbe, 0x79, 0xb6, 0xe7, 0x77, 0xe1, 0xb9, 0x1d, 0x74, 0x23, 0xd1, + 0x41, 0x17, 0x7a, 0x67, 0xd9, 0xb9, 0x97, 0xbe, 0x51, 0x84, 0x33, 0x99, 0xe5, 0x62, 0xb5, 0xe7, + 0xb2, 0xa1, 0xf6, 0x7c, 0x2e, 0xa1, 0xf6, 0xb4, 0x3b, 0x97, 0x3e, 0x1c, 0x3d, 0xa8, 0x78, 0x1f, + 0xcd, 0xa2, 0x3c, 0xdc, 0xa7, 0x0e, 0xd4, 0x78, 0x1f, 0xad, 0x18, 0x61, 0x93, 0xef, 0x77, 0x92, + 0xee, 0xf3, 0x77, 0x2c, 0x38, 0x99, 0x39, 0x36, 0x47, 0xa0, 0xeb, 0x5a, 0x37, 0x75, 0x5d, 0x4f, + 0xf6, 0x3c, 0x5b, 0x73, 0x94, 0x5f, 0x5f, 0x18, 0xc8, 0xf9, 0x16, 0x76, 0x93, 0xbf, 0x0a, 0xc3, + 0x4e, 0xad, 0x46, 0xc2, 0x70, 0xcd, 0xaf, 0xab, 0xd0, 0xe6, 0xcf, 0xb2, 0x7b, 0x56, 0x0c, 0xbe, + 0xb7, 0x5f, 0x9e, 0x49, 0xb2, 0x88, 0xd1, 0x58, 0xe7, 0x80, 0x3e, 0x01, 0x43, 0xa1, 0xcc, 0x4a, + 0xd7, 0x77, 0xff, 0x59, 0xe9, 0x98, 0x92, 0x40, 0x69, 0x2a, 0x14, 0x4b, 0xf4, 0xbf, 0xe9, 0xf1, + 0x76, 0xd2, 0x52, 0x65, 0x22, 0xfa, 0xcb, 0x7d, 0x44, 0xdd, 0x79, 0x0e, 0x60, 0x57, 0x5d, 0x09, + 0x92, 0x5a, 0x08, 0xed, 0xb2, 0xa0, 0x51, 0xa1, 0x8f, 0xc0, 0x44, 0xc8, 0x43, 0x4d, 0xc6, 0xce, + 0x13, 0x7c, 0x2e, 0xb2, 0x68, 0x5d, 0xd5, 0x04, 0x0e, 0xa7, 0xa8, 0xd1, 0xb2, 0xac, 0x95, 0xb9, + 0xc9, 0xf0, 0xe9, 0x79, 0x2e, 0xae, 0x51, 0xb8, 0xca, 0x1c, 0x4b, 0x0e, 0x02, 0xeb, 0x7e, 0xad, + 0x24, 0xfa, 0x04, 0x00, 0x9d, 0x44, 0x42, 0x1b, 0x31, 0x98, 0xbf, 0x85, 0xd2, 0xbd, 0xa5, 0x9e, + 0xe9, 0x3b, 0xce, 0x1e, 0x36, 0x2f, 0x2a, 0x26, 0x58, 0x63, 0x88, 0x1c, 0x18, 0x8d, 0xff, 0xc5, + 0x19, 0xa2, 0xcf, 0xe7, 0xd6, 0x90, 0x64, 0xce, 0x14, 0xdf, 0x8b, 0x3a, 0x0b, 0x6c, 0x72, 0x44, + 0x1f, 0x87, 0x93, 0xbb, 0xb9, 0x1e, 0x29, 0xa5, 0x38, 0xe9, 0x63, 0xbe, 0x1f, 0x4a, 0x7e, 0x79, + 0xfb, 0x77, 0x01, 0x1e, 0xee, 0xb0, 0xd3, 0xa3, 0x39, 0xd3, 0x9a, 0xfc, 0x74, 0x52, 0x45, 0x30, + 0x93, 0x59, 0xd8, 0xd0, 0x19, 0x24, 0x16, 0x54, 0xe1, 0x1d, 0x2f, 0xa8, 0x1f, 0xb2, 0x34, 0xe5, + 0x0d, 0x77, 0xe7, 0xfd, 0xf0, 0x01, 0x4f, 0xb0, 0x43, 0xd4, 0xe6, 0x6c, 0x66, 0xa8, 0x44, 0x9e, + 0xeb, 0xb9, 0x39, 0xbd, 0xeb, 0x48, 0xbe, 0x96, 0x1d, 0xbc, 0x99, 0x6b, 0x4b, 0x2e, 0x1d, 0xf4, + 0xfb, 0x8f, 0x2a, 0x90, 0xf3, 0x1f, 0x5a, 0x70, 0x32, 0x05, 0xe6, 0x6d, 0x20, 0xa1, 0x88, 0x2f, + 0xb6, 0xfe, 0x8e, 0x1b, 0x2f, 0x19, 0xf2, 0x6f, 0xb8, 0x2c, 0xbe, 0xe1, 0x64, 0x2e, 0x5d, 0xb2, + 0xe9, 0x5f, 0xfc, 0x93, 0xf2, 0x14, 0xab, 0xc0, 0x24, 0xc4, 0xf9, 0x4d, 0x47, 0x2d, 0x38, 0x5b, + 0x6b, 0x07, 0x41, 0x3c, 0x59, 0x33, 0x16, 0x27, 0xbf, 0xeb, 0x3d, 0x76, 0x77, 0xbf, 0x7c, 0x76, + 0xa1, 0x0b, 0x2d, 0xee, 0xca, 0x0d, 0x79, 0x80, 0x9a, 0x29, 0xbf, 0x2f, 0x91, 0x18, 0x3e, 0xd3, + 0x53, 0x23, 0xed, 0x25, 0xc6, 0x1f, 0xb0, 0x66, 0x78, 0x8f, 0x65, 0x70, 0x3e, 0x5a, 0xed, 0xc9, + 0xb7, 0x26, 0x32, 0xf7, 0xcc, 0x2a, 0x9c, 0xe9, 0x3c, 0x99, 0x0e, 0xf4, 0x78, 0xfe, 0x0f, 0x2c, + 0x38, 0xdd, 0x31, 0x42, 0xd3, 0xb7, 0xe1, 0x65, 0xc1, 0xfe, 0x9c, 0x05, 0x8f, 0x64, 0x96, 0x30, + 0x5c, 0x0c, 0x2f, 0x40, 0xa9, 0x96, 0x48, 0x6b, 0x1c, 0xc7, 0x2a, 0x51, 0x29, 0x8d, 0x63, 0x1a, + 0xc3, 0x93, 0xb0, 0xd0, 0xd5, 0x93, 0xf0, 0x37, 0x2d, 0x48, 0x1d, 0xf5, 0x47, 0x20, 0x79, 0xae, + 0x98, 0x92, 0xe7, 0x63, 0xbd, 0xf4, 0x66, 0x8e, 0xd0, 0xf9, 0x97, 0xe3, 0x70, 0x22, 0xe7, 0xed, + 0xeb, 0x2e, 0x4c, 0x6e, 0xd5, 0x88, 0x19, 0xec, 0xa0, 0x53, 0x10, 0xb0, 0x8e, 0x91, 0x11, 0x78, + 0x36, 0xe9, 0x14, 0x09, 0x4e, 0x57, 0x81, 0x3e, 0x67, 0xc1, 0x31, 0xe7, 0x56, 0xb8, 0x44, 0x6f, + 0x10, 0x6e, 0x6d, 0xbe, 0xe1, 0xd7, 0x76, 0xa8, 0x60, 0x26, 0x97, 0xd5, 0x0b, 0x99, 0x5a, 0xdd, + 0x1b, 0xd5, 0x14, 0xbd, 0x51, 0xfd, 0xf4, 0xdd, 0xfd, 0xf2, 0xb1, 0x2c, 0x2a, 0x9c, 0x59, 0x17, + 0xc2, 0x22, 0x7b, 0x8f, 0x13, 0x6d, 0x77, 0x0a, 0xc7, 0x91, 0xf5, 0x48, 0x99, 0x8b, 0xc4, 0x12, + 0x83, 0x15, 0x1f, 0xf4, 0x29, 0x28, 0x6d, 0xc9, 0x97, 0xf7, 0x19, 0x22, 0x77, 0xdc, 0x91, 0x9d, + 0xe3, 0x11, 0x70, 0xd7, 0x0c, 0x45, 0x84, 0x63, 0xa6, 0xe8, 0x35, 0x28, 0x7a, 0x9b, 0x61, 0xa7, + 0xf4, 0xfb, 0x09, 0x1f, 0x5c, 0x1e, 0xf4, 0x66, 0x7d, 0xb9, 0x8a, 0x69, 0x41, 0x74, 0x19, 0x8a, + 0xc1, 0xcd, 0xba, 0x30, 0x49, 0x64, 0x2e, 0x52, 0x3c, 0xbf, 0x98, 0xd3, 0x2a, 0xc6, 0x09, 0xcf, + 0x2f, 0x62, 0xca, 0x02, 0x55, 0xa0, 0x9f, 0x3d, 0x18, 0x15, 0xa2, 0x6d, 0xe6, 0x55, 0xbe, 0xc3, + 0xc3, 0x6b, 0xfe, 0x18, 0x8d, 0x11, 0x60, 0xce, 0x08, 0x6d, 0xc0, 0x40, 0x8d, 0xa5, 0x6a, 0x17, + 0xb2, 0xec, 0xfb, 0x33, 0x8d, 0x0f, 0x1d, 0x72, 0xd8, 0x0b, 0x5d, 0x3c, 0xa3, 0xc0, 0x82, 0x17, + 0xe3, 0x4a, 0x5a, 0xdb, 0x9b, 0xf2, 0xc4, 0xca, 0xe6, 0x4a, 0x5a, 0xdb, 0xcb, 0xd5, 0x8e, 0x5c, + 0x19, 0x05, 0x16, 0xbc, 0xd0, 0xcb, 0x50, 0xd8, 0xac, 0x89, 0xc7, 0xa0, 0x99, 0x56, 0x08, 0x33, + 0x6e, 0xd1, 0xfc, 0xc0, 0xdd, 0xfd, 0x72, 0x61, 0x79, 0x01, 0x17, 0x36, 0x6b, 0x68, 0x1d, 0x06, + 0x37, 0x79, 0xa4, 0x13, 0x61, 0x68, 0x78, 0x22, 0x3b, 0x08, 0x4b, 0x2a, 0x18, 0x0a, 0x7f, 0x58, + 0x28, 0x10, 0x58, 0x32, 0x61, 0xc9, 0x64, 0x54, 0xc4, 0x16, 0x11, 0x30, 0x72, 0xf6, 0x60, 0x51, + 0x76, 0xf8, 0x55, 0x23, 0x8e, 0xfb, 0x82, 0x35, 0x8e, 0x74, 0x56, 0x3b, 0x77, 0xda, 0x01, 0xcb, + 0x26, 0x20, 0x22, 0x8b, 0x65, 0xce, 0xea, 0x39, 0x49, 0xd4, 0x69, 0x56, 0x2b, 0x22, 0x1c, 0x33, + 0x45, 0x3b, 0x30, 0xba, 0x1b, 0xb6, 0xb6, 0x89, 0x5c, 0xd2, 0x2c, 0xd0, 0x58, 0x8e, 0x34, 0x7b, + 0x5d, 0x10, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x5d, 0x88, 0x5d, 0x6b, 0xae, 0xeb, 0xcc, 0xb0, + 0xc9, 0x9b, 0x76, 0xff, 0xdb, 0x6d, 0xff, 0xe6, 0x5e, 0x44, 0x44, 0x9c, 0xc7, 0xcc, 0xee, 0x7f, + 0x83, 0x93, 0xa4, 0xbb, 0x5f, 0x20, 0xb0, 0x64, 0x82, 0xae, 0x8b, 0xee, 0x61, 0xbb, 0xe7, 0x44, + 0x7e, 0x10, 0xe9, 0x39, 0x49, 0x94, 0xd3, 0x29, 0x6c, 0xb7, 0x8c, 0x59, 0xb1, 0x5d, 0xb2, 0xb5, + 0xed, 0x47, 0xbe, 0x97, 0xd8, 0xa1, 0x27, 0xf3, 0x77, 0xc9, 0x4a, 0x06, 0x7d, 0x7a, 0x97, 0xcc, + 0xa2, 0xc2, 0x99, 0x75, 0xa1, 0x3a, 0x8c, 0xb5, 0xfc, 0x20, 0xba, 0xe5, 0x07, 0x72, 0x7e, 0xa1, + 0x0e, 0x8a, 0x52, 0x83, 0x52, 0xd4, 0xc8, 0x42, 0xa8, 0x9a, 0x18, 0x9c, 0xe0, 0x89, 0x3e, 0x0a, + 0x83, 0x61, 0xcd, 0x69, 0x90, 0x95, 0xab, 0xd3, 0x53, 0xf9, 0xc7, 0x4f, 0x95, 0x93, 0xe4, 0xcc, + 0x2e, 0x1e, 0xa8, 0x86, 0x93, 0x60, 0xc9, 0x0e, 0x2d, 0x43, 0x3f, 0x4b, 0xd2, 0xca, 0x82, 0x92, + 0xe6, 0xc4, 0xc2, 0x4e, 0x3d, 0xf7, 0xe0, 0x7b, 0x13, 0x03, 0x63, 0x5e, 0x9c, 0xae, 0x01, 0xa1, + 0x29, 0xf0, 0xc3, 0xe9, 0xe3, 0xf9, 0x6b, 0x40, 0x28, 0x18, 0xae, 0x56, 0x3b, 0xad, 0x01, 0x45, + 0x84, 0x63, 0xa6, 0x74, 0x67, 0xa6, 0xbb, 0xe9, 0x89, 0x0e, 0xae, 0x7c, 0xb9, 0x7b, 0x29, 0xdb, + 0x99, 0xe9, 0x4e, 0x4a, 0x59, 0xd8, 0xbf, 0x3e, 0x94, 0x96, 0x59, 0x98, 0x86, 0xe9, 0xff, 0xb0, + 0x52, 0xce, 0x07, 0x1f, 0xe8, 0x55, 0xe1, 0x7d, 0x88, 0x17, 0xd7, 0xcf, 0x59, 0x70, 0xa2, 0x95, + 0xf9, 0x21, 0x42, 0x00, 0xe8, 0x4d, 0x6f, 0xce, 0x3f, 0x5d, 0x05, 0xb0, 0xcd, 0xc6, 0xe3, 0x9c, + 0x9a, 0x92, 0xca, 0x81, 0xe2, 0x3b, 0x56, 0x0e, 0xac, 0xc1, 0x50, 0x8d, 0xdf, 0xe4, 0x64, 0xe0, + 0xf5, 0x9e, 0xc2, 0x2f, 0x32, 0x51, 0x42, 0x5c, 0x01, 0x37, 0xb1, 0x62, 0x81, 0x7e, 0xd8, 0x82, + 0xd3, 0xc9, 0xa6, 0x63, 0xc2, 0xd0, 0x22, 0xea, 0x2d, 0x57, 0x6b, 0x2d, 0x8b, 0xef, 0x4f, 0xc9, + 0xff, 0x06, 0xf1, 0xbd, 0x6e, 0x04, 0xb8, 0x73, 0x65, 0x68, 0x31, 0x43, 0xaf, 0x36, 0x60, 0x5a, + 0x14, 0x7b, 0xd0, 0xad, 0xbd, 0x00, 0x23, 0x4d, 0xbf, 0xed, 0x45, 0xc2, 0xf3, 0x4f, 0x78, 0x21, + 0x31, 0xef, 0x9b, 0x35, 0x0d, 0x8e, 0x0d, 0xaa, 0x84, 0x46, 0x6e, 0xe8, 0xbe, 0x35, 0x72, 0x6f, + 0xc1, 0x88, 0xa7, 0xb9, 0xaa, 0x77, 0xba, 0xc1, 0x0a, 0xed, 0xa2, 0x46, 0xcd, 0x5b, 0xa9, 0x43, + 0xb0, 0xc1, 0xad, 0xb3, 0xb6, 0x0c, 0xde, 0x99, 0xb6, 0xec, 0x68, 0xfd, 0x0d, 0x7f, 0xbe, 0x90, + 0x71, 0x63, 0xe0, 0x5a, 0xb9, 0x57, 0x4d, 0xad, 0xdc, 0xb9, 0xa4, 0x56, 0x2e, 0x65, 0xaa, 0x32, + 0x14, 0x72, 0xbd, 0x67, 0x87, 0xeb, 0x39, 0xa4, 0xee, 0xf7, 0x5a, 0xf0, 0x10, 0xb3, 0x7d, 0xd0, + 0x0a, 0xde, 0xb1, 0xbd, 0xe3, 0xe1, 0xbb, 0xfb, 0xe5, 0x87, 0x56, 0xb3, 0xd9, 0xe1, 0xbc, 0x7a, + 0xec, 0x06, 0x9c, 0xed, 0x76, 0xee, 0x32, 0x1f, 0xd7, 0xba, 0x72, 0x8e, 0x88, 0x7d, 0x5c, 0xeb, + 0x2b, 0x8b, 0x98, 0x61, 0x7a, 0x0d, 0x18, 0x67, 0xff, 0x07, 0x0b, 0x8a, 0x15, 0xbf, 0x7e, 0x04, + 0x37, 0xfa, 0x0f, 0x1b, 0x37, 0xfa, 0x87, 0xb3, 0x4f, 0xfc, 0x7a, 0xae, 0xb1, 0x6f, 0x29, 0x61, + 0xec, 0x3b, 0x9d, 0xc7, 0xa0, 0xb3, 0x69, 0xef, 0xa7, 0x8a, 0x30, 0x5c, 0xf1, 0xeb, 0x6a, 0x9d, + 0xfd, 0x93, 0xfb, 0x79, 0x60, 0x92, 0x9b, 0xef, 0x47, 0xe3, 0xcc, 0x5c, 0x63, 0x65, 0xc8, 0x81, + 0x6f, 0xb3, 0x77, 0x26, 0x37, 0x88, 0xbb, 0xb5, 0x1d, 0x91, 0x7a, 0xf2, 0x73, 0x8e, 0xee, 0x9d, + 0xc9, 0x37, 0x8b, 0x30, 0x9e, 0xa8, 0x1d, 0x35, 0x60, 0xb4, 0xa1, 0x9b, 0x92, 0xc4, 0x3c, 0xbd, + 0x2f, 0x2b, 0x94, 0xf0, 0xd3, 0xd7, 0x40, 0xd8, 0x64, 0x8e, 0x66, 0x01, 0x94, 0x6f, 0x85, 0xd4, + 0xf6, 0xb3, 0x6b, 0x8d, 0x72, 0xbe, 0x08, 0xb1, 0x46, 0x81, 0x5e, 0x84, 0xe1, 0xc8, 0x6f, 0xf9, + 0x0d, 0x7f, 0x6b, 0xef, 0x0a, 0x91, 0xb1, 0x04, 0x95, 0xf7, 0xed, 0x46, 0x8c, 0xc2, 0x3a, 0x1d, + 0xba, 0x0d, 0x93, 0x8a, 0x49, 0xf5, 0x10, 0xcc, 0x6b, 0x4c, 0x6d, 0xb2, 0x9e, 0xe4, 0x88, 0xd3, + 0x95, 0xa0, 0x97, 0x61, 0x8c, 0xb9, 0x01, 0xb3, 0xf2, 0x57, 0xc8, 0x9e, 0x8c, 0x31, 0xcb, 0x24, + 0xec, 0x35, 0x03, 0x83, 0x13, 0x94, 0x68, 0x01, 0x26, 0x9b, 0x6e, 0x98, 0x28, 0x3e, 0xc0, 0x8a, + 0xb3, 0x06, 0xac, 0x25, 0x91, 0x38, 0x4d, 0x6f, 0xff, 0xac, 0x18, 0x63, 0x2f, 0x72, 0xdf, 0x5b, + 0x8e, 0xef, 0xee, 0xe5, 0xf8, 0x0d, 0x0b, 0x26, 0x68, 0xed, 0xcc, 0xb7, 0x51, 0x0a, 0x52, 0x2a, + 0x0b, 0x81, 0xd5, 0x21, 0x0b, 0xc1, 0x39, 0xba, 0x6d, 0xd7, 0xfd, 0x76, 0x24, 0xb4, 0xa3, 0xda, + 0xbe, 0x4c, 0xa1, 0x58, 0x60, 0x05, 0x1d, 0x09, 0x02, 0xf1, 0x1e, 0x5b, 0xa7, 0x23, 0x41, 0x80, + 0x05, 0x56, 0x26, 0x29, 0xe8, 0xcb, 0x4e, 0x52, 0xc0, 0x63, 0x4d, 0x0b, 0x2f, 0x38, 0x21, 0xd2, + 0x6a, 0xb1, 0xa6, 0xa5, 0x7b, 0x5c, 0x4c, 0x63, 0x7f, 0xad, 0x08, 0x23, 0x15, 0xbf, 0x1e, 0x3b, + 0x76, 0xbc, 0x60, 0x38, 0x76, 0x9c, 0x4d, 0x38, 0x76, 0x4c, 0xe8, 0xb4, 0xef, 0xb9, 0x71, 0x7c, + 0xab, 0xdc, 0x38, 0x7e, 0xc3, 0x62, 0xa3, 0xb6, 0xb8, 0x5e, 0xe5, 0xae, 0xb2, 0xe8, 0x22, 0x0c, + 0xb3, 0x1d, 0x8e, 0x05, 0x00, 0x90, 0xde, 0x0e, 0x2c, 0x69, 0xe0, 0x7a, 0x0c, 0xc6, 0x3a, 0x0d, + 0x3a, 0x0f, 0x43, 0x21, 0x71, 0x82, 0xda, 0xb6, 0xda, 0xde, 0x85, 0x6b, 0x02, 0x87, 0x61, 0x85, + 0x45, 0x6f, 0xc4, 0x61, 0x8e, 0x8b, 0xf9, 0x0f, 0x8a, 0xf5, 0xf6, 0xf0, 0x25, 0x92, 0x1f, 0xdb, + 0xd8, 0xbe, 0x01, 0x28, 0x4d, 0xdf, 0x43, 0x20, 0xce, 0xb2, 0x19, 0x88, 0xb3, 0x94, 0x0a, 0xc2, + 0xf9, 0xd7, 0x16, 0x8c, 0x55, 0xfc, 0x3a, 0x5d, 0xba, 0xdf, 0x49, 0xeb, 0x54, 0x8f, 0xf1, 0x3e, + 0xd0, 0x21, 0xc6, 0xfb, 0xa3, 0xd0, 0x5f, 0xf1, 0xeb, 0x5d, 0x82, 0x85, 0xfe, 0x2d, 0x0b, 0x06, + 0x2b, 0x7e, 0xfd, 0x08, 0x0c, 0x2f, 0xaf, 0x9a, 0x86, 0x97, 0x87, 0x72, 0xe6, 0x4d, 0x8e, 0xad, + 0xe5, 0xff, 0xef, 0x83, 0x51, 0xda, 0x4e, 0x7f, 0x4b, 0x0e, 0xa5, 0xd1, 0x6d, 0x56, 0x0f, 0xdd, + 0x46, 0xaf, 0x01, 0x7e, 0xa3, 0xe1, 0xdf, 0x4a, 0x0e, 0xeb, 0x32, 0x83, 0x62, 0x81, 0x45, 0xcf, + 0xc0, 0x50, 0x2b, 0x20, 0xbb, 0xae, 0x2f, 0xe4, 0x6b, 0xcd, 0x8c, 0x55, 0x11, 0x70, 0xac, 0x28, + 0xe8, 0xc5, 0x3b, 0x74, 0x3d, 0x2a, 0x4b, 0xd4, 0x7c, 0xaf, 0xce, 0x6d, 0x13, 0x45, 0x91, 0x88, + 0x48, 0x83, 0x63, 0x83, 0x0a, 0xdd, 0x80, 0x12, 0xfb, 0xcf, 0xb6, 0x9d, 0x83, 0xa7, 0x40, 0x17, + 0xa9, 0x59, 0x05, 0x03, 0x1c, 0xf3, 0x42, 0xcf, 0x01, 0x44, 0x32, 0x99, 0x47, 0x28, 0x82, 0x46, + 0xaa, 0xbb, 0x88, 0x4a, 0xf3, 0x11, 0x62, 0x8d, 0x0a, 0x3d, 0x0d, 0xa5, 0xc8, 0x71, 0x1b, 0xab, + 0xae, 0xc7, 0xec, 0xf7, 0xb4, 0xfd, 0x22, 0x43, 0xaa, 0x00, 0xe2, 0x18, 0x4f, 0x65, 0x41, 0x16, + 0x0e, 0x68, 0x7e, 0x2f, 0x12, 0xc9, 0xc0, 0x8a, 0x5c, 0x16, 0x5c, 0x55, 0x50, 0xac, 0x51, 0xa0, + 0x6d, 0x38, 0xe5, 0x7a, 0x2c, 0x69, 0x0f, 0xa9, 0xee, 0xb8, 0xad, 0x8d, 0xd5, 0xea, 0x75, 0x12, + 0xb8, 0x9b, 0x7b, 0xf3, 0x4e, 0x6d, 0x87, 0x78, 0x32, 0xb9, 0xf5, 0x63, 0xa2, 0x89, 0xa7, 0x56, + 0x3a, 0xd0, 0xe2, 0x8e, 0x9c, 0xec, 0xe7, 0xd9, 0x7c, 0xbf, 0x5a, 0x45, 0x4f, 0x19, 0x5b, 0xc7, + 0x09, 0x7d, 0xeb, 0xb8, 0xb7, 0x5f, 0x1e, 0xb8, 0x5a, 0xd5, 0x62, 0xd2, 0xbc, 0x04, 0xc7, 0x2b, + 0x7e, 0xbd, 0xe2, 0x07, 0xd1, 0xb2, 0x1f, 0xdc, 0x72, 0x82, 0xba, 0x9c, 0x5e, 0x65, 0x19, 0x95, + 0x87, 0xee, 0x9f, 0xfd, 0x7c, 0x77, 0x31, 0x22, 0xee, 0x3c, 0xcf, 0x24, 0xb6, 0x03, 0x3e, 0xb7, + 0xac, 0x31, 0xd9, 0x41, 0xa5, 0xbd, 0xba, 0xe4, 0x44, 0x04, 0x5d, 0x85, 0xd1, 0x9a, 0x7e, 0x8c, + 0x8a, 0xe2, 0x4f, 0xca, 0x83, 0xcc, 0x38, 0x63, 0x33, 0xcf, 0x5d, 0xb3, 0xbc, 0xfd, 0x59, 0x51, + 0x09, 0x57, 0x44, 0x70, 0x97, 0xd6, 0x5e, 0xf2, 0xbf, 0xcb, 0xbc, 0x38, 0x85, 0xfc, 0x98, 0x87, + 0xdc, 0xae, 0xdc, 0x31, 0x2f, 0x8e, 0xfd, 0xdd, 0x70, 0x22, 0x59, 0x7d, 0xcf, 0x49, 0xe8, 0x17, + 0x60, 0x32, 0xd0, 0x0b, 0x6a, 0x49, 0x06, 0x8f, 0xf3, 0x5c, 0x26, 0x09, 0x24, 0x4e, 0xd3, 0xdb, + 0x2f, 0xc2, 0x24, 0xbd, 0xfc, 0x2a, 0x41, 0x8e, 0xf5, 0x72, 0xf7, 0xf0, 0x44, 0xff, 0xb1, 0x9f, + 0x1d, 0x44, 0x89, 0x8c, 0x53, 0xe8, 0x93, 0x30, 0x16, 0x92, 0x55, 0xd7, 0x6b, 0xdf, 0x96, 0xba, + 0xb5, 0x0e, 0xef, 0x8c, 0xab, 0x4b, 0x3a, 0x25, 0xbf, 0x3f, 0x98, 0x30, 0x9c, 0xe0, 0x86, 0x9a, + 0x30, 0x76, 0xcb, 0xf5, 0xea, 0xfe, 0xad, 0x50, 0xf2, 0x1f, 0xca, 0x57, 0xd4, 0xdf, 0xe0, 0x94, + 0x89, 0x36, 0x1a, 0xd5, 0xdd, 0x30, 0x98, 0xe1, 0x04, 0x73, 0xba, 0xd8, 0x83, 0xb6, 0x37, 0x17, + 0x5e, 0x0b, 0x09, 0x7f, 0x39, 0x2a, 0x16, 0x3b, 0x96, 0x40, 0x1c, 0xe3, 0xe9, 0x62, 0x67, 0x7f, + 0x2e, 0x05, 0x7e, 0x9b, 0xa7, 0x37, 0x12, 0x8b, 0x1d, 0x2b, 0x28, 0xd6, 0x28, 0xe8, 0x66, 0xc8, + 0xfe, 0xad, 0xfb, 0x1e, 0xf6, 0xfd, 0x48, 0x6e, 0x9f, 0x2c, 0x3d, 0x9f, 0x06, 0xc7, 0x06, 0x15, + 0x5a, 0x06, 0x14, 0xb6, 0x5b, 0xad, 0x06, 0x73, 0x5d, 0x74, 0x1a, 0x8c, 0x15, 0x77, 0xbb, 0x2a, + 0x72, 0xef, 0x96, 0x6a, 0x0a, 0x8b, 0x33, 0x4a, 0xd0, 0x73, 0x71, 0x53, 0x34, 0xb5, 0x9f, 0x35, + 0x95, 0x1b, 0xf5, 0xaa, 0xbc, 0x9d, 0x12, 0x87, 0x96, 0x60, 0x30, 0xdc, 0x0b, 0x6b, 0x51, 0x23, + 0xec, 0x94, 0x0c, 0xb1, 0xca, 0x48, 0xb4, 0x5c, 0xbc, 0xbc, 0x08, 0x96, 0x65, 0x51, 0x0d, 0xa6, + 0x04, 0xc7, 0x85, 0x6d, 0xc7, 0x53, 0x29, 0xda, 0xb8, 0xf7, 0xde, 0xc5, 0xbb, 0xfb, 0xe5, 0x29, + 0x51, 0xb3, 0x8e, 0xbe, 0xb7, 0x5f, 0xa6, 0x8b, 0x23, 0x03, 0x83, 0xb3, 0xb8, 0xf1, 0xc9, 0x57, + 0xab, 0xf9, 0xcd, 0x56, 0x25, 0xf0, 0x37, 0xdd, 0x06, 0xe9, 0x64, 0x18, 0xad, 0x1a, 0x94, 0x62, + 0xf2, 0x19, 0x30, 0x9c, 0xe0, 0x66, 0x7f, 0x96, 0xc9, 0x8e, 0x55, 0x77, 0xcb, 0x73, 0xa2, 0x76, + 0x40, 0x50, 0x13, 0x46, 0x5b, 0x6c, 0x77, 0x11, 0x49, 0x87, 0xc4, 0x5c, 0x7f, 0xa1, 0x47, 0xfd, + 0xd7, 0x2d, 0x96, 0x36, 0xd1, 0xf0, 0x83, 0xac, 0xe8, 0xec, 0xb0, 0xc9, 0xdd, 0xfe, 0x17, 0x27, + 0x99, 0xf4, 0x51, 0xe5, 0x4a, 0xad, 0x41, 0xf1, 0x6c, 0x4c, 0x5c, 0x63, 0x67, 0xf2, 0xd5, 0xc7, + 0xf1, 0xb0, 0x88, 0xa7, 0x67, 0x58, 0x96, 0x45, 0x9f, 0x80, 0x31, 0x7a, 0x2b, 0x54, 0x12, 0x40, + 0x38, 0x7d, 0x2c, 0x3f, 0xbc, 0x8f, 0xa2, 0xd2, 0x13, 0x92, 0xe9, 0x85, 0x71, 0x82, 0x19, 0x7a, + 0x83, 0xb9, 0x06, 0x4a, 0xd6, 0x85, 0x5e, 0x58, 0xeb, 0x5e, 0x80, 0x92, 0xad, 0xc6, 0x04, 0xb5, + 0x61, 0x2a, 0x9d, 0x76, 0x35, 0x9c, 0xb6, 0xf3, 0xc5, 0xeb, 0x74, 0xe6, 0xd4, 0x38, 0x73, 0x54, + 0x1a, 0x17, 0xe2, 0x2c, 0xfe, 0x68, 0x35, 0x99, 0x14, 0xb3, 0x68, 0x28, 0x9e, 0x53, 0x89, 0x31, + 0x47, 0x3b, 0xe6, 0xc3, 0xdc, 0x82, 0xd3, 0x5a, 0x5e, 0xc1, 0x4b, 0x81, 0xc3, 0x5c, 0x53, 0x5c, + 0xb6, 0x9d, 0x6a, 0x72, 0xd1, 0x23, 0x77, 0xf7, 0xcb, 0xa7, 0x37, 0x3a, 0x11, 0xe2, 0xce, 0x7c, + 0xd0, 0x55, 0x38, 0xce, 0x83, 0x53, 0x2c, 0x12, 0xa7, 0xde, 0x70, 0x3d, 0x25, 0x78, 0xf1, 0x25, + 0x7f, 0xf2, 0xee, 0x7e, 0xf9, 0xf8, 0x5c, 0x16, 0x01, 0xce, 0x2e, 0x87, 0x5e, 0x85, 0x52, 0xdd, + 0x0b, 0x45, 0x1f, 0x0c, 0x18, 0xa9, 0x1b, 0x4b, 0x8b, 0xeb, 0x55, 0xf5, 0xfd, 0xf1, 0x1f, 0x1c, + 0x17, 0x40, 0x5b, 0xdc, 0xf2, 0xa1, 0xd4, 0x55, 0x83, 0xa9, 0x98, 0x85, 0x49, 0x8d, 0xae, 0xf1, + 0x3c, 0x9d, 0x9b, 0xfc, 0xd4, 0xab, 0x2d, 0xe3, 0xe5, 0xba, 0xc1, 0x18, 0xbd, 0x0e, 0x48, 0xa4, + 0x08, 0x99, 0xab, 0xb1, 0x8c, 0x56, 0x9a, 0x3b, 0xa2, 0xba, 0x85, 0x56, 0x53, 0x14, 0x38, 0xa3, + 0x14, 0xba, 0x4c, 0x77, 0x15, 0x1d, 0x2a, 0x76, 0x2d, 0x95, 0x20, 0x78, 0x91, 0xb4, 0x02, 0xc2, + 0x3c, 0xe8, 0x4c, 0x8e, 0x38, 0x51, 0x0e, 0xd5, 0xe1, 0x94, 0xd3, 0x8e, 0x7c, 0x66, 0x54, 0x32, + 0x49, 0x37, 0xfc, 0x1d, 0xe2, 0x31, 0x7b, 0xee, 0x10, 0x8b, 0x85, 0x78, 0x6a, 0xae, 0x03, 0x1d, + 0xee, 0xc8, 0x85, 0x4a, 0xe4, 0x32, 0xe7, 0xbf, 0xb0, 0xf7, 0x18, 0x2f, 0x6d, 0xb9, 0x11, 0x54, + 0x52, 0xa0, 0x17, 0x61, 0x78, 0xdb, 0x0f, 0xa3, 0x75, 0x12, 0xdd, 0xf2, 0x83, 0x1d, 0x11, 0x93, + 0x3c, 0xce, 0x03, 0x11, 0xa3, 0xb0, 0x4e, 0x47, 0xaf, 0xdc, 0xcc, 0xdb, 0x68, 0x65, 0x91, 0x39, + 0x7a, 0x0c, 0xc5, 0x7b, 0xcc, 0x65, 0x0e, 0xc6, 0x12, 0x2f, 0x49, 0x57, 0x2a, 0x0b, 0xcc, 0x69, + 0x23, 0x41, 0xba, 0x52, 0x59, 0xc0, 0x12, 0x4f, 0xa7, 0x6b, 0xb8, 0xed, 0x04, 0xa4, 0x12, 0xf8, + 0x35, 0x12, 0x6a, 0xd9, 0x47, 0x1e, 0xe6, 0x11, 0xd7, 0xe9, 0x74, 0xad, 0x66, 0x11, 0xe0, 0xec, + 0x72, 0x88, 0xa4, 0x73, 0x6a, 0x8e, 0xe5, 0x5b, 0xdb, 0xd2, 0xf2, 0x4c, 0x8f, 0x69, 0x35, 0x3d, + 0x98, 0x50, 0xd9, 0x3c, 0x79, 0x8c, 0xf5, 0x70, 0x7a, 0x9c, 0xcd, 0xed, 0xde, 0x03, 0xb4, 0x2b, + 0xfb, 0xe5, 0x4a, 0x82, 0x13, 0x4e, 0xf1, 0x36, 0x82, 0x6d, 0x4e, 0x74, 0x0d, 0xb6, 0x79, 0x01, + 0x4a, 0x61, 0xfb, 0x66, 0xdd, 0x6f, 0x3a, 0xae, 0xc7, 0x9c, 0x36, 0xb4, 0xbb, 0x5f, 0x55, 0x22, + 0x70, 0x4c, 0x83, 0x96, 0x61, 0xc8, 0x91, 0xc6, 0x49, 0x94, 0x1f, 0x47, 0x4c, 0x99, 0x24, 0x79, + 0x68, 0x1d, 0x69, 0x8e, 0x54, 0x65, 0xd1, 0x2b, 0x30, 0x2a, 0x82, 0x2b, 0x88, 0x04, 0xd8, 0x53, + 0xe6, 0x0b, 0xd8, 0xaa, 0x8e, 0xc4, 0x26, 0x2d, 0xba, 0x06, 0xc3, 0x91, 0xdf, 0x60, 0xcf, 0x38, + 0xa9, 0x98, 0x77, 0x22, 0x3f, 0xdc, 0xe7, 0x86, 0x22, 0xd3, 0xd5, 0xe6, 0xaa, 0x28, 0xd6, 0xf9, + 0xa0, 0x0d, 0x3e, 0xdf, 0x59, 0xae, 0x11, 0x12, 0x8a, 0x0c, 0xca, 0xa7, 0xf3, 0x3c, 0xee, 0x18, + 0x99, 0xb9, 0x1c, 0x44, 0x49, 0xac, 0xb3, 0x41, 0x97, 0x60, 0xb2, 0x15, 0xb8, 0x3e, 0x9b, 0x13, + 0xca, 0xd8, 0x3a, 0x6d, 0x66, 0x16, 0xac, 0x24, 0x09, 0x70, 0xba, 0x0c, 0x8b, 0x8d, 0x21, 0x80, + 0xd3, 0x27, 0x79, 0x76, 0x24, 0x7e, 0x95, 0xe6, 0x30, 0xac, 0xb0, 0x68, 0x8d, 0xed, 0xc4, 0x5c, + 0x0b, 0x34, 0x3d, 0x93, 0x1f, 0xba, 0x4c, 0xd7, 0x16, 0x71, 0xe1, 0x55, 0xfd, 0xc5, 0x31, 0x07, + 0x54, 0xd7, 0x92, 0x12, 0xd3, 0x2b, 0x40, 0x38, 0x7d, 0xaa, 0x83, 0xcb, 0x67, 0xe2, 0x56, 0x16, + 0x0b, 0x04, 0x06, 0x38, 0xc4, 0x09, 0x9e, 0xe8, 0x23, 0x30, 0x21, 0xe2, 0xd0, 0xc6, 0xdd, 0x74, + 0x3a, 0x7e, 0x16, 0x83, 0x13, 0x38, 0x9c, 0xa2, 0xe6, 0xd9, 0x89, 0x9c, 0x9b, 0x0d, 0x22, 0xb6, + 0xbe, 0x55, 0xd7, 0xdb, 0x09, 0xa7, 0xcf, 0xb0, 0xfd, 0x41, 0x64, 0x27, 0x4a, 0x62, 0x71, 0x46, + 0x09, 0xb4, 0x01, 0x13, 0xad, 0x80, 0x90, 0x26, 0x13, 0xf4, 0xc5, 0x79, 0x56, 0xe6, 0xa1, 0x61, + 0x68, 0x4b, 0x2a, 0x09, 0xdc, 0xbd, 0x0c, 0x18, 0x4e, 0x71, 0x40, 0xb7, 0x60, 0xc8, 0xdf, 0x25, + 0xc1, 0x36, 0x71, 0xea, 0xd3, 0x67, 0x3b, 0x3c, 0xd6, 0x12, 0x87, 0xdb, 0x55, 0x41, 0x9b, 0xf0, + 0x65, 0x91, 0xe0, 0xee, 0xbe, 0x2c, 0xb2, 0x32, 0xf4, 0x7f, 0x5a, 0x70, 0x52, 0x5a, 0x87, 0xaa, + 0x2d, 0xda, 0xeb, 0x0b, 0xbe, 0x17, 0x46, 0x01, 0x0f, 0x66, 0xf2, 0x48, 0x7e, 0x80, 0x8f, 0x8d, + 0x9c, 0x42, 0x4a, 0x11, 0x7d, 0x32, 0x8f, 0x22, 0xc4, 0xf9, 0x35, 0xd2, 0xab, 0x69, 0x48, 0x22, + 0xb9, 0x19, 0xcd, 0x85, 0xcb, 0x6f, 0x2c, 0xae, 0x4f, 0x3f, 0xca, 0x23, 0xb1, 0xd0, 0xc5, 0x50, + 0x4d, 0x22, 0x71, 0x9a, 0x1e, 0x5d, 0x84, 0x82, 0x1f, 0x4e, 0x3f, 0xd6, 0x21, 0x8f, 0xb5, 0x5f, + 0xbf, 0x5a, 0xe5, 0x3e, 0x8d, 0x57, 0xab, 0xb8, 0xe0, 0x87, 0x32, 0x43, 0x10, 0xbd, 0x8f, 0x85, + 0xd3, 0x8f, 0x73, 0xb5, 0xa5, 0xcc, 0x10, 0xc4, 0x80, 0x38, 0xc6, 0xa3, 0x6d, 0x18, 0x0f, 0x8d, + 0x7b, 0x6f, 0x38, 0x7d, 0x8e, 0xf5, 0xd4, 0xe3, 0x79, 0x83, 0x66, 0x50, 0x6b, 0xa9, 0x3b, 0x4c, + 0x2e, 0x38, 0xc9, 0x96, 0xaf, 0x2e, 0xed, 0xe6, 0x1d, 0x4e, 0x3f, 0xd1, 0x65, 0x75, 0x69, 0xc4, + 0xfa, 0xea, 0xd2, 0x79, 0xe0, 0x04, 0xcf, 0x99, 0xef, 0x82, 0xc9, 0x94, 0xb8, 0x74, 0x10, 0xff, + 0xfd, 0x99, 0x1d, 0x18, 0x35, 0xa6, 0xe4, 0x03, 0x75, 0xef, 0xf8, 0x9d, 0x12, 0x94, 0x94, 0xd9, + 0x1d, 0x5d, 0x30, 0x3d, 0x3a, 0x4e, 0x26, 0x3d, 0x3a, 0x86, 0x2a, 0x7e, 0xdd, 0x70, 0xe2, 0xd8, + 0xc8, 0x88, 0xd7, 0x99, 0xb7, 0x01, 0xf6, 0xfe, 0xc8, 0x48, 0x33, 0x25, 0x14, 0x7b, 0x76, 0x0d, + 0xe9, 0xeb, 0x68, 0x9d, 0xb8, 0x04, 0x93, 0x9e, 0xcf, 0x64, 0x74, 0x52, 0x97, 0x02, 0x18, 0x93, + 0xb3, 0x4a, 0x7a, 0x00, 0xac, 0x04, 0x01, 0x4e, 0x97, 0xa1, 0x15, 0x72, 0x41, 0x29, 0x69, 0x0e, + 0xe1, 0x72, 0x14, 0x16, 0x58, 0x7a, 0x37, 0xe4, 0xbf, 0xc2, 0xe9, 0x89, 0xfc, 0xbb, 0x21, 0x2f, + 0x94, 0x14, 0xc6, 0x42, 0x29, 0x8c, 0x31, 0xed, 0x7f, 0xcb, 0xaf, 0xaf, 0x54, 0x84, 0x98, 0xaf, + 0x45, 0xd2, 0xae, 0xaf, 0x54, 0x30, 0xc7, 0xa1, 0x39, 0x18, 0x60, 0x3f, 0xc2, 0xe9, 0x91, 0xfc, + 0x80, 0x49, 0xac, 0x84, 0x96, 0xa1, 0x90, 0x15, 0xc0, 0xa2, 0x20, 0xd3, 0xee, 0xd2, 0xbb, 0x11, + 0xd3, 0xee, 0x0e, 0xde, 0xa7, 0x76, 0x57, 0x32, 0xc0, 0x31, 0x2f, 0x74, 0x1b, 0x8e, 0x1b, 0xf7, + 0x51, 0xf5, 0xea, 0x0a, 0xf2, 0x0d, 0xbf, 0x09, 0xe2, 0xf9, 0xd3, 0xa2, 0xd1, 0xc7, 0x57, 0xb2, + 0x38, 0xe1, 0xec, 0x0a, 0x50, 0x03, 0x26, 0x6b, 0xa9, 0x5a, 0x87, 0x7a, 0xaf, 0x55, 0xcd, 0x8b, + 0x74, 0x8d, 0x69, 0xc6, 0xe8, 0x15, 0x18, 0x7a, 0xdb, 0xe7, 0x4e, 0x5a, 0xe2, 0x6a, 0x22, 0x23, + 0x7e, 0x0c, 0xbd, 0x71, 0xb5, 0xca, 0xe0, 0xf7, 0xf6, 0xcb, 0xc3, 0x15, 0xbf, 0x2e, 0xff, 0x62, + 0x55, 0x00, 0xfd, 0x80, 0x05, 0x33, 0xe9, 0x0b, 0xaf, 0x6a, 0xf4, 0x68, 0xef, 0x8d, 0xb6, 0x45, + 0xa5, 0x33, 0x4b, 0xb9, 0xec, 0x70, 0x87, 0xaa, 0xd0, 0x87, 0xe8, 0x7a, 0x0a, 0xdd, 0x3b, 0x44, + 0xa4, 0x77, 0x7e, 0x24, 0x5e, 0x4f, 0x14, 0x7a, 0x6f, 0xbf, 0x3c, 0xce, 0x77, 0x46, 0xf7, 0x8e, + 0x8a, 0xf9, 0xcd, 0x0b, 0xa0, 0xef, 0x86, 0xe3, 0x41, 0x5a, 0x83, 0x4a, 0xa4, 0x10, 0xfe, 0x54, + 0x2f, 0xbb, 0x6c, 0x72, 0xc0, 0x71, 0x16, 0x43, 0x9c, 0x5d, 0x8f, 0xfd, 0xab, 0x16, 0xd3, 0x6f, + 0x8b, 0x66, 0x91, 0xb0, 0xdd, 0x38, 0x8a, 0xa4, 0xf2, 0x4b, 0x86, 0xed, 0xf8, 0xbe, 0x3d, 0x9b, + 0xfe, 0xb1, 0xc5, 0x3c, 0x9b, 0x8e, 0xf0, 0x8d, 0xd6, 0x1b, 0x30, 0x14, 0xc9, 0x64, 0xff, 0x1d, + 0xf2, 0xe0, 0x6b, 0x8d, 0x62, 0xde, 0x5d, 0xea, 0x92, 0xa3, 0xf2, 0xfa, 0x2b, 0x36, 0xf6, 0x3f, + 0xe0, 0x23, 0x20, 0x31, 0x47, 0x60, 0xa2, 0x5b, 0x34, 0x4d, 0x74, 0xe5, 0x2e, 0x5f, 0x90, 0x63, + 0xaa, 0xfb, 0xfb, 0x66, 0xbb, 0x99, 0x72, 0xef, 0xdd, 0xee, 0x52, 0x67, 0x7f, 0xc1, 0x02, 0x88, + 0x93, 0x2c, 0xf4, 0x90, 0xce, 0xf5, 0x25, 0x7a, 0xad, 0xf1, 0x23, 0xbf, 0xe6, 0x37, 0x84, 0x81, + 0xe2, 0x54, 0x6c, 0x25, 0xe4, 0xf0, 0x7b, 0xda, 0x6f, 0xac, 0xa8, 0x51, 0x59, 0x46, 0x3d, 0x2d, + 0xc6, 0x76, 0x6b, 0x23, 0xe2, 0xe9, 0x97, 0x2d, 0x38, 0x96, 0xe5, 0xf0, 0x4f, 0x2f, 0xc9, 0x5c, + 0xcd, 0xa9, 0xdc, 0x1d, 0xd5, 0x68, 0x5e, 0x17, 0x70, 0xac, 0x28, 0x7a, 0xce, 0x93, 0x7b, 0xb0, + 0x04, 0x00, 0x57, 0x61, 0xb4, 0x12, 0x10, 0x4d, 0xbe, 0x78, 0x8d, 0x47, 0xd2, 0xe1, 0xed, 0x79, + 0xe6, 0xc0, 0x51, 0x74, 0xec, 0xaf, 0x14, 0xe0, 0x18, 0x77, 0xda, 0x99, 0xdb, 0xf5, 0xdd, 0x7a, + 0xc5, 0xaf, 0x8b, 0x67, 0x9a, 0x6f, 0xc2, 0x48, 0x4b, 0xd3, 0x4d, 0x77, 0x0a, 0x66, 0xad, 0xeb, + 0xb0, 0x63, 0x6d, 0x9a, 0x0e, 0xc5, 0x06, 0x2f, 0x54, 0x87, 0x11, 0xb2, 0xeb, 0xd6, 0x94, 0xe7, + 0x47, 0xe1, 0xc0, 0x87, 0xb4, 0xaa, 0x65, 0x49, 0xe3, 0x83, 0x0d, 0xae, 0x3d, 0xbb, 0xda, 0x6a, + 0x22, 0x5a, 0x5f, 0x17, 0x6f, 0x8f, 0x1f, 0xb5, 0xe0, 0xa1, 0x9c, 0xd0, 0xd7, 0xb4, 0xba, 0x5b, + 0xcc, 0x3d, 0x4a, 0x4c, 0x5b, 0x55, 0x1d, 0x77, 0x9a, 0xc2, 0x02, 0x8b, 0x3e, 0x0a, 0xc0, 0x9d, + 0x9e, 0x88, 0x57, 0xeb, 0x1a, 0x23, 0xd8, 0x08, 0x6f, 0xaa, 0x45, 0xaa, 0x94, 0xe5, 0xb1, 0xc6, + 0xcb, 0xfe, 0x72, 0x1f, 0xf4, 0x33, 0x27, 0x1b, 0x54, 0x81, 0xc1, 0x6d, 0x9e, 0x25, 0xae, 0xe3, + 0xb8, 0x51, 0x5a, 0x99, 0x78, 0x2e, 0x1e, 0x37, 0x0d, 0x8a, 0x25, 0x1b, 0xb4, 0x06, 0x53, 0x3c, + 0x59, 0x5f, 0x63, 0x91, 0x34, 0x9c, 0x3d, 0xa9, 0xf6, 0xe5, 0xf9, 0xe7, 0x95, 0xfa, 0x7b, 0x25, + 0x4d, 0x82, 0xb3, 0xca, 0xa1, 0xd7, 0x60, 0x8c, 0x5e, 0xc3, 0xfd, 0x76, 0x24, 0x39, 0xf1, 0x34, + 0x7d, 0xea, 0x66, 0xb2, 0x61, 0x60, 0x71, 0x82, 0x1a, 0xbd, 0x02, 0xa3, 0xad, 0x94, 0x82, 0xbb, + 0x3f, 0xd6, 0x04, 0x99, 0x4a, 0x6d, 0x93, 0x96, 0xf9, 0xfc, 0xb7, 0xd9, 0x0b, 0x87, 0x8d, 0xed, + 0x80, 0x84, 0xdb, 0x7e, 0xa3, 0xce, 0x24, 0xe0, 0x7e, 0xcd, 0xe7, 0x3f, 0x81, 0xc7, 0xa9, 0x12, + 0x94, 0xcb, 0xa6, 0xe3, 0x36, 0xda, 0x01, 0x89, 0xb9, 0x0c, 0x98, 0x5c, 0x96, 0x13, 0x78, 0x9c, + 0x2a, 0xd1, 0x5d, 0x73, 0x3f, 0x78, 0x38, 0x9a, 0x7b, 0xfb, 0xa7, 0x0b, 0x60, 0x0c, 0xed, 0x77, + 0x70, 0xfa, 0xc0, 0x57, 0xa1, 0x6f, 0x2b, 0x68, 0xd5, 0x84, 0x43, 0x59, 0xe6, 0x97, 0xc5, 0xb9, + 0xc3, 0xf9, 0x97, 0xd1, 0xff, 0x98, 0x95, 0xa2, 0x6b, 0xfc, 0x78, 0x25, 0xf0, 0xe9, 0x21, 0x27, + 0x63, 0x2d, 0xaa, 0xa7, 0x35, 0x83, 0x32, 0x48, 0x44, 0x87, 0xa8, 0xc4, 0xe2, 0x7d, 0x00, 0xe7, + 0x60, 0xf8, 0x5e, 0x55, 0x45, 0x28, 0x18, 0xc9, 0x05, 0x5d, 0x84, 0x61, 0x91, 0xd1, 0x8d, 0xbd, + 0x00, 0xe1, 0x8b, 0x89, 0xf9, 0x8a, 0x2d, 0xc6, 0x60, 0xac, 0xd3, 0xd8, 0x3f, 0x58, 0x80, 0xa9, + 0x8c, 0x27, 0x7c, 0xfc, 0x18, 0xd9, 0x72, 0xc3, 0x48, 0xa5, 0x27, 0xd7, 0x8e, 0x11, 0x0e, 0xc7, + 0x8a, 0x82, 0xee, 0x55, 0xfc, 0xa0, 0x4a, 0x1e, 0x4e, 0xe2, 0x89, 0x8c, 0xc0, 0x1e, 0x30, 0xd1, + 0xf7, 0x59, 0xe8, 0x6b, 0x87, 0x44, 0xc6, 0x13, 0x57, 0xc7, 0x36, 0x33, 0x6b, 0x33, 0x0c, 0xbd, + 0x02, 0x6e, 0x29, 0x0b, 0xb1, 0x76, 0x05, 0xe4, 0x36, 0x62, 0x8e, 0xa3, 0x8d, 0x8b, 0x88, 0xe7, + 0x78, 0x91, 0xb8, 0x28, 0xc6, 0x81, 0x71, 0x19, 0x14, 0x0b, 0xac, 0xfd, 0xa5, 0x22, 0x9c, 0xcc, + 0x7d, 0xd4, 0x4b, 0x9b, 0xde, 0xf4, 0x3d, 0x37, 0xf2, 0x95, 0x13, 0x1e, 0x0f, 0x86, 0x4b, 0x5a, + 0xdb, 0x6b, 0x02, 0x8e, 0x15, 0x05, 0x3a, 0x07, 0xfd, 0x4c, 0x29, 0x9e, 0x4a, 0xd4, 0x3e, 0xbf, + 0xc8, 0xa3, 0x23, 0x72, 0xb4, 0x76, 0xaa, 0x17, 0x3b, 0x9e, 0xea, 0x8f, 0x52, 0x09, 0xc6, 0x6f, + 0x24, 0x0f, 0x14, 0xda, 0x5c, 0xdf, 0x6f, 0x60, 0x86, 0x44, 0x8f, 0x8b, 0xfe, 0x4a, 0x78, 0x9d, + 0x61, 0xa7, 0xee, 0x87, 0x5a, 0xa7, 0x3d, 0x09, 0x83, 0x3b, 0x64, 0x2f, 0x70, 0xbd, 0xad, 0xa4, + 0x37, 0xe2, 0x15, 0x0e, 0xc6, 0x12, 0x6f, 0xe6, 0x0c, 0x1e, 0x3c, 0x8c, 0x9c, 0xc1, 0xfa, 0x0c, + 0x18, 0xea, 0x2a, 0x9e, 0xfc, 0x50, 0x11, 0xc6, 0xf1, 0xfc, 0xe2, 0x7b, 0x03, 0x71, 0x2d, 0x3d, + 0x10, 0x87, 0x91, 0x5a, 0xf7, 0x60, 0xa3, 0xf1, 0x4b, 0x16, 0x8c, 0xb3, 0xbc, 0x72, 0x22, 0x22, + 0x87, 0xeb, 0x7b, 0x47, 0x70, 0x15, 0x78, 0x14, 0xfa, 0x03, 0x5a, 0x69, 0x32, 0x43, 0x3b, 0x6b, + 0x09, 0xe6, 0x38, 0x74, 0x0a, 0xfa, 0x58, 0x13, 0xe8, 0xe0, 0x8d, 0xf0, 0x2d, 0x78, 0xd1, 0x89, + 0x1c, 0xcc, 0xa0, 0x2c, 0x36, 0x20, 0x26, 0xad, 0x86, 0xcb, 0x1b, 0x1d, 0xbb, 0x2c, 0xbc, 0x3b, + 0xc2, 0x7d, 0x64, 0x36, 0xed, 0x9d, 0xc5, 0x06, 0xcc, 0x66, 0xd9, 0xf9, 0x9a, 0xfd, 0x17, 0x05, + 0x38, 0x93, 0x59, 0xae, 0xe7, 0xd8, 0x80, 0x9d, 0x4b, 0x3f, 0xc8, 0x14, 0x59, 0xc5, 0x23, 0xf4, + 0xf5, 0xee, 0xeb, 0x55, 0xfa, 0xef, 0xef, 0x21, 0x64, 0x5f, 0x66, 0x97, 0xbd, 0x4b, 0x42, 0xf6, + 0x65, 0xb6, 0x2d, 0x47, 0x4d, 0xf0, 0x37, 0x85, 0x9c, 0x6f, 0x61, 0x0a, 0x83, 0xf3, 0x74, 0x9f, + 0x61, 0xc8, 0x50, 0x5e, 0xc2, 0xf9, 0x1e, 0xc3, 0x61, 0x58, 0x61, 0xd1, 0x1c, 0x8c, 0x37, 0x5d, + 0x8f, 0x6e, 0x3e, 0x7b, 0xa6, 0x28, 0xae, 0x6c, 0x19, 0x6b, 0x26, 0x1a, 0x27, 0xe9, 0x91, 0xab, + 0x85, 0xf3, 0xe3, 0x5f, 0xf7, 0xca, 0x81, 0x56, 0xdd, 0xac, 0xe9, 0xce, 0xa1, 0x7a, 0x31, 0x23, + 0xb4, 0xdf, 0x9a, 0xa6, 0x27, 0x2a, 0xf6, 0xae, 0x27, 0x1a, 0xc9, 0xd6, 0x11, 0xcd, 0xbc, 0x02, + 0xa3, 0xf7, 0x6d, 0x1b, 0xb1, 0xbf, 0x51, 0x84, 0x87, 0x3b, 0x2c, 0x7b, 0xbe, 0xd7, 0x1b, 0x63, + 0xa0, 0xed, 0xf5, 0xa9, 0x71, 0xa8, 0xc0, 0xb1, 0xcd, 0x76, 0xa3, 0xb1, 0xc7, 0x1e, 0x35, 0x91, + 0xba, 0xa4, 0x10, 0x32, 0xa5, 0x54, 0x8e, 0x1c, 0x5b, 0xce, 0xa0, 0xc1, 0x99, 0x25, 0xe9, 0x15, + 0x8b, 0x9e, 0x24, 0x7b, 0x8a, 0x55, 0xe2, 0x8a, 0x85, 0x75, 0x24, 0x36, 0x69, 0xd1, 0x25, 0x98, + 0x74, 0x76, 0x1d, 0x97, 0xe7, 0x44, 0x90, 0x0c, 0xf8, 0x1d, 0x4b, 0xe9, 0xa2, 0xe7, 0x92, 0x04, + 0x38, 0x5d, 0x06, 0xbd, 0x0e, 0xc8, 0xbf, 0xc9, 0x1e, 0x4a, 0xd4, 0x2f, 0x11, 0x4f, 0x58, 0xdd, + 0xd9, 0xd8, 0x15, 0xe3, 0x2d, 0xe1, 0x6a, 0x8a, 0x02, 0x67, 0x94, 0x4a, 0x04, 0x96, 0x1b, 0xc8, + 0x0f, 0x2c, 0xd7, 0x79, 0x5f, 0xec, 0x9a, 0x9d, 0xed, 0x22, 0x8c, 0x1e, 0xd0, 0xfd, 0xd7, 0xfe, + 0xb7, 0x16, 0x28, 0x05, 0xb1, 0x19, 0x18, 0xfa, 0x15, 0xe6, 0x9f, 0xcc, 0x55, 0xdb, 0x5a, 0x2c, + 0xa8, 0xe3, 0x9a, 0x7f, 0x72, 0x8c, 0xc4, 0x26, 0x2d, 0x9f, 0x43, 0x9a, 0x5f, 0xb1, 0x71, 0x2b, + 0x10, 0x71, 0x2b, 0x15, 0x05, 0xfa, 0x18, 0x0c, 0xd6, 0xdd, 0x5d, 0x37, 0x14, 0xca, 0xb1, 0x03, + 0x1b, 0xe3, 0xe2, 0xad, 0x73, 0x91, 0xb3, 0xc1, 0x92, 0x9f, 0xfd, 0x43, 0x85, 0xb8, 0x4f, 0xde, + 0x68, 0xfb, 0x91, 0x73, 0x04, 0x27, 0xf9, 0x25, 0xe3, 0x24, 0x7f, 0x3c, 0x7b, 0xa0, 0xb5, 0x26, + 0xe5, 0x9e, 0xe0, 0x57, 0x13, 0x27, 0xf8, 0x13, 0xdd, 0x59, 0x75, 0x3e, 0xb9, 0xff, 0xa1, 0x05, + 0x93, 0x06, 0xfd, 0x11, 0x1c, 0x20, 0xcb, 0xe6, 0x01, 0xf2, 0x48, 0xd7, 0x6f, 0xc8, 0x39, 0x38, + 0xbe, 0xbf, 0x98, 0x68, 0x3b, 0x3b, 0x30, 0xde, 0x86, 0xbe, 0x6d, 0x27, 0xa8, 0x77, 0x4a, 0x59, + 0x94, 0x2a, 0x34, 0x7b, 0xd9, 0x09, 0x84, 0xa7, 0xc2, 0x33, 0xb2, 0xd7, 0x29, 0xa8, 0xab, 0x97, + 0x02, 0xab, 0x0a, 0xbd, 0x04, 0x03, 0x61, 0xcd, 0x6f, 0xa9, 0x37, 0x53, 0x2c, 0xe5, 0x6f, 0x95, + 0x41, 0xee, 0xed, 0x97, 0x91, 0x59, 0x1d, 0x05, 0x63, 0x41, 0x8f, 0xde, 0x84, 0x51, 0xf6, 0x4b, + 0xb9, 0x0d, 0x16, 0xf3, 0x35, 0x18, 0x55, 0x9d, 0x90, 0xfb, 0xd4, 0x1a, 0x20, 0x6c, 0xb2, 0x9a, + 0xd9, 0x82, 0x92, 0xfa, 0xac, 0x07, 0x6a, 0xed, 0xfe, 0x57, 0x45, 0x98, 0xca, 0x98, 0x73, 0x28, + 0x34, 0x46, 0xe2, 0x62, 0x8f, 0x53, 0xf5, 0x1d, 0x8e, 0x45, 0xc8, 0x2e, 0x50, 0x75, 0x31, 0xb7, + 0x7a, 0xae, 0xf4, 0x5a, 0x48, 0x92, 0x95, 0x52, 0x50, 0xf7, 0x4a, 0x69, 0x65, 0x47, 0xd6, 0xd5, + 0xb4, 0x22, 0xd5, 0xd2, 0x07, 0x3a, 0xa6, 0xbf, 0xd1, 0x07, 0xc7, 0xb2, 0xe2, 0x09, 0xa3, 0xcf, + 0x24, 0xf2, 0x88, 0xbf, 0xd0, 0xa9, 0x87, 0xf5, 0x92, 0x3c, 0xb9, 0xb8, 0x08, 0xe3, 0x39, 0x6b, + 0x66, 0x16, 0xef, 0xda, 0xcd, 0xa2, 0x4e, 0x16, 0x5e, 0x27, 0xe0, 0xf9, 0xdf, 0xe5, 0xf6, 0xf1, + 0x81, 0x9e, 0x1b, 0x20, 0x12, 0xc7, 0x87, 0x09, 0x97, 0x24, 0x09, 0xee, 0xee, 0x92, 0x24, 0x6b, + 0x46, 0x2b, 0x30, 0x50, 0xe3, 0xbe, 0x2e, 0xc5, 0xee, 0x5b, 0x18, 0x77, 0x74, 0x51, 0x1b, 0xb0, + 0x70, 0x70, 0x11, 0x0c, 0x66, 0x5c, 0x18, 0xd6, 0x3a, 0xe6, 0x81, 0x4e, 0x9e, 0x1d, 0x7a, 0xf0, + 0x69, 0x5d, 0xf0, 0x40, 0x27, 0xd0, 0x8f, 0x5a, 0x90, 0x78, 0xf0, 0xa2, 0x94, 0x72, 0x56, 0xae, + 0x52, 0xee, 0x2c, 0xf4, 0x05, 0x7e, 0x83, 0x24, 0x93, 0x54, 0x63, 0xbf, 0x41, 0x30, 0xc3, 0x50, + 0x8a, 0x28, 0x56, 0xb5, 0x8c, 0xe8, 0xd7, 0x48, 0x71, 0x41, 0x7c, 0x14, 0xfa, 0x1b, 0x64, 0x97, + 0x34, 0x92, 0xb9, 0x04, 0x57, 0x29, 0x10, 0x73, 0x9c, 0xfd, 0x4b, 0x7d, 0x70, 0xba, 0x63, 0xac, + 0x2b, 0x7a, 0x19, 0xdb, 0x72, 0x22, 0x72, 0xcb, 0xd9, 0x4b, 0x26, 0xfd, 0xba, 0xc4, 0xc1, 0x58, + 0xe2, 0xd9, 0xf3, 0x4f, 0x9e, 0xbb, 0x23, 0xa1, 0xc2, 0x14, 0x29, 0x3b, 0x04, 0xd6, 0x54, 0x89, + 0x15, 0x0f, 0x43, 0x25, 0xf6, 0x1c, 0x40, 0x18, 0x36, 0xb8, 0x5b, 0x60, 0x5d, 0xbc, 0x2b, 0x8d, + 0x73, 0xbc, 0x54, 0x57, 0x05, 0x06, 0x6b, 0x54, 0x68, 0x11, 0x26, 0x5a, 0x81, 0x1f, 0x71, 0x8d, + 0xf0, 0x22, 0xf7, 0x9c, 0xed, 0x37, 0xc3, 0x0c, 0x55, 0x12, 0x78, 0x9c, 0x2a, 0x81, 0x5e, 0x84, + 0x61, 0x11, 0x7a, 0xa8, 0xe2, 0xfb, 0x0d, 0xa1, 0x84, 0x52, 0xce, 0xa4, 0xd5, 0x18, 0x85, 0x75, + 0x3a, 0xad, 0x18, 0x53, 0x33, 0x0f, 0x66, 0x16, 0xe3, 0xaa, 0x66, 0x8d, 0x2e, 0x11, 0xa6, 0x7c, + 0xa8, 0xa7, 0x30, 0xe5, 0xb1, 0x5a, 0xae, 0xd4, 0xb3, 0xd5, 0x13, 0xba, 0x2a, 0xb2, 0xbe, 0xda, + 0x07, 0x53, 0x62, 0xe2, 0x3c, 0xe8, 0xe9, 0x72, 0x2d, 0x3d, 0x5d, 0x0e, 0x43, 0x71, 0xf7, 0xde, + 0x9c, 0x39, 0xea, 0x39, 0xf3, 0xc3, 0x16, 0x98, 0x92, 0x1a, 0xfa, 0xdf, 0x73, 0xb3, 0x26, 0xbe, + 0x98, 0x2b, 0xf9, 0xc5, 0x31, 0x8c, 0xdf, 0x59, 0xfe, 0x44, 0xfb, 0x5f, 0x5b, 0xf0, 0x48, 0x57, + 0x8e, 0x68, 0x09, 0x4a, 0x4c, 0x9c, 0xd4, 0x2e, 0x7a, 0x4f, 0x28, 0xcf, 0x7a, 0x89, 0xc8, 0x91, + 0x6e, 0xe3, 0x92, 0x68, 0x29, 0x95, 0x9e, 0xf2, 0xc9, 0x8c, 0xf4, 0x94, 0xc7, 0x8d, 0xee, 0xb9, + 0xcf, 0xfc, 0x94, 0x5f, 0xa4, 0x27, 0x8e, 0xf1, 0xaa, 0x0d, 0x7d, 0xc0, 0x50, 0x3a, 0xda, 0x09, + 0xa5, 0x23, 0x32, 0xa9, 0xb5, 0x33, 0xe4, 0x23, 0x30, 0xc1, 0x62, 0x12, 0xb2, 0x77, 0x1e, 0xe2, + 0xbd, 0x5d, 0x21, 0xf6, 0xe5, 0x5e, 0x4d, 0xe0, 0x70, 0x8a, 0xda, 0xfe, 0xb3, 0x22, 0x0c, 0xf0, + 0xe5, 0x77, 0x04, 0xd7, 0xcb, 0xa7, 0xa1, 0xe4, 0x36, 0x9b, 0x6d, 0x9e, 0x71, 0xb0, 0x3f, 0xf6, + 0x0c, 0x5e, 0x91, 0x40, 0x1c, 0xe3, 0xd1, 0xb2, 0xd0, 0x77, 0x77, 0x08, 0x7b, 0xcc, 0x1b, 0x3e, + 0xbb, 0xe8, 0x44, 0x0e, 0x97, 0x95, 0xd4, 0x39, 0x1b, 0x6b, 0xc6, 0xd1, 0x27, 0x01, 0xc2, 0x28, + 0x70, 0xbd, 0x2d, 0x0a, 0x13, 0xb1, 0xf1, 0x9f, 0xea, 0xc0, 0xad, 0xaa, 0x88, 0x39, 0xcf, 0x78, + 0xcf, 0x51, 0x08, 0xac, 0x71, 0x44, 0xb3, 0xc6, 0x49, 0x3f, 0x93, 0x18, 0x3b, 0xe0, 0x5c, 0xe3, + 0x31, 0x9b, 0xf9, 0x20, 0x94, 0x14, 0xf3, 0x6e, 0xda, 0xaf, 0x11, 0x5d, 0x2c, 0xfa, 0x30, 0x8c, + 0x27, 0xda, 0x76, 0x20, 0xe5, 0xd9, 0x2f, 0x5b, 0x30, 0xce, 0x1b, 0xb3, 0xe4, 0xed, 0x8a, 0xd3, + 0xe0, 0x0e, 0x1c, 0x6b, 0x64, 0xec, 0xca, 0x62, 0xf8, 0x7b, 0xdf, 0xc5, 0x95, 0xb2, 0x2c, 0x0b, + 0x8b, 0x33, 0xeb, 0x40, 0xe7, 0xe9, 0x8a, 0xa3, 0xbb, 0xae, 0xd3, 0x10, 0xf1, 0x0d, 0x46, 0xf8, + 0x6a, 0xe3, 0x30, 0xac, 0xb0, 0xf6, 0x1f, 0x59, 0x30, 0xc9, 0x5b, 0x7e, 0x85, 0xec, 0xa9, 0xbd, + 0xe9, 0x5b, 0xd9, 0x76, 0x91, 0xeb, 0xb6, 0x90, 0x93, 0xeb, 0x56, 0xff, 0xb4, 0x62, 0xc7, 0x4f, + 0xfb, 0x8a, 0x05, 0x62, 0x86, 0x1c, 0x81, 0x3e, 0xe3, 0xbb, 0x4c, 0x7d, 0xc6, 0x4c, 0xfe, 0x22, + 0xc8, 0x51, 0x64, 0xfc, 0xb5, 0x05, 0x13, 0x9c, 0x20, 0xb6, 0xd5, 0x7f, 0x4b, 0xc7, 0x61, 0xde, + 0xfc, 0xa2, 0x4c, 0xe7, 0xcb, 0x2b, 0x64, 0x6f, 0xc3, 0xaf, 0x38, 0xd1, 0x76, 0xf6, 0x47, 0x19, + 0x83, 0xd5, 0xd7, 0x71, 0xb0, 0xea, 0x72, 0x01, 0x19, 0xa9, 0xe0, 0xba, 0x04, 0x08, 0x38, 0x68, + 0x2a, 0x38, 0xfb, 0xcf, 0x2d, 0x40, 0xbc, 0x1a, 0x43, 0x70, 0xa3, 0xe2, 0x10, 0x83, 0x6a, 0x07, + 0x5d, 0xbc, 0x35, 0x29, 0x0c, 0xd6, 0xa8, 0x0e, 0xa5, 0x7b, 0x12, 0x0e, 0x17, 0xc5, 0xee, 0x0e, + 0x17, 0x07, 0xe8, 0xd1, 0x7f, 0x3e, 0x00, 0xc9, 0x97, 0x7d, 0xe8, 0x3a, 0x8c, 0xd4, 0x9c, 0x96, + 0x73, 0xd3, 0x6d, 0xb8, 0x91, 0x4b, 0xc2, 0x4e, 0xde, 0x58, 0x0b, 0x1a, 0x9d, 0x30, 0x91, 0x6b, + 0x10, 0x6c, 0xf0, 0x41, 0xb3, 0x00, 0xad, 0xc0, 0xdd, 0x75, 0x1b, 0x64, 0x8b, 0xa9, 0x5d, 0x58, + 0x44, 0x15, 0xee, 0x1a, 0x26, 0xa1, 0x58, 0xa3, 0xc8, 0x08, 0xa3, 0x50, 0x7c, 0xc0, 0x61, 0x14, + 0xe0, 0xc8, 0xc2, 0x28, 0xf4, 0x1d, 0x28, 0x8c, 0xc2, 0xd0, 0x81, 0xc3, 0x28, 0xf4, 0xf7, 0x14, + 0x46, 0x01, 0xc3, 0x09, 0x29, 0x7b, 0xd2, 0xff, 0xcb, 0x6e, 0x83, 0x88, 0x0b, 0x07, 0x0f, 0x03, + 0x33, 0x73, 0x77, 0xbf, 0x7c, 0x02, 0x67, 0x52, 0xe0, 0x9c, 0x92, 0xe8, 0xa3, 0x30, 0xed, 0x34, + 0x1a, 0xfe, 0x2d, 0x35, 0xa8, 0x4b, 0x61, 0xcd, 0x69, 0x70, 0x13, 0xc8, 0x20, 0xe3, 0x7a, 0xea, + 0xee, 0x7e, 0x79, 0x7a, 0x2e, 0x87, 0x06, 0xe7, 0x96, 0x46, 0xaf, 0x42, 0xa9, 0x15, 0xf8, 0xb5, + 0x35, 0xed, 0xf9, 0xf1, 0x19, 0xda, 0x81, 0x15, 0x09, 0xbc, 0xb7, 0x5f, 0x1e, 0x55, 0x7f, 0xd8, + 0x81, 0x1f, 0x17, 0xc8, 0x88, 0x8b, 0x30, 0x7c, 0xa8, 0x71, 0x11, 0x76, 0x60, 0xaa, 0x4a, 0x02, + 0xd7, 0x69, 0xb8, 0x77, 0xa8, 0xbc, 0x2c, 0xf7, 0xa7, 0x0d, 0x28, 0x05, 0x89, 0x1d, 0xb9, 0xa7, + 0x50, 0xc4, 0x5a, 0x36, 0x2e, 0xb9, 0x03, 0xc7, 0x8c, 0xec, 0xff, 0x66, 0xc1, 0xa0, 0x78, 0xc9, + 0x77, 0x04, 0x52, 0xe3, 0x9c, 0x61, 0x94, 0x28, 0x67, 0x77, 0x18, 0x6b, 0x4c, 0xae, 0x39, 0x62, + 0x25, 0x61, 0x8e, 0x78, 0xa4, 0x13, 0x93, 0xce, 0x86, 0x88, 0xff, 0xaf, 0x48, 0xa5, 0x77, 0xe3, + 0x4d, 0xf9, 0x83, 0xef, 0x82, 0x75, 0x18, 0x0c, 0xc5, 0x9b, 0xe6, 0x42, 0xfe, 0x6b, 0x90, 0xe4, + 0x20, 0xc6, 0x5e, 0x74, 0xe2, 0x15, 0xb3, 0x64, 0x92, 0xf9, 0x58, 0xba, 0xf8, 0x00, 0x1f, 0x4b, + 0x77, 0x7b, 0x75, 0xdf, 0x77, 0x18, 0xaf, 0xee, 0xed, 0xaf, 0xb3, 0x93, 0x53, 0x87, 0x1f, 0x81, + 0x50, 0x75, 0xc9, 0x3c, 0x63, 0xed, 0x0e, 0x33, 0x4b, 0x34, 0x2a, 0x47, 0xb8, 0xfa, 0x45, 0x0b, + 0x4e, 0x67, 0x7c, 0x95, 0x26, 0x69, 0x3d, 0x03, 0x43, 0x4e, 0xbb, 0xee, 0xaa, 0xb5, 0xac, 0x99, + 0x26, 0xe7, 0x04, 0x1c, 0x2b, 0x0a, 0xb4, 0x00, 0x93, 0xe4, 0x76, 0xcb, 0xe5, 0x86, 0x5c, 0xdd, + 0xf9, 0xb8, 0xc8, 0x9f, 0x7f, 0x2e, 0x25, 0x91, 0x38, 0x4d, 0xaf, 0x02, 0x44, 0x15, 0x73, 0x03, + 0x44, 0xfd, 0xbc, 0x05, 0xc3, 0xea, 0x55, 0xef, 0x03, 0xef, 0xed, 0x8f, 0x98, 0xbd, 0xfd, 0x70, + 0x87, 0xde, 0xce, 0xe9, 0xe6, 0x3f, 0x28, 0xa8, 0xf6, 0x56, 0xfc, 0x20, 0xea, 0x41, 0x82, 0xbb, + 0xff, 0x87, 0x13, 0x17, 0x61, 0xd8, 0x69, 0xb5, 0x24, 0x42, 0x7a, 0xc0, 0xb1, 0xc0, 0xf2, 0x31, + 0x18, 0xeb, 0x34, 0xea, 0x1d, 0x47, 0x31, 0xf7, 0x1d, 0x47, 0x1d, 0x20, 0x72, 0x82, 0x2d, 0x12, + 0x51, 0x98, 0x70, 0xd8, 0xcd, 0xdf, 0x6f, 0xda, 0x91, 0xdb, 0x98, 0x75, 0xbd, 0x28, 0x8c, 0x82, + 0xd9, 0x15, 0x2f, 0xba, 0x1a, 0xf0, 0x2b, 0xa4, 0x16, 0x62, 0x4d, 0xf1, 0xc2, 0x1a, 0x5f, 0x19, + 0xc1, 0x82, 0xd5, 0xd1, 0x6f, 0xba, 0x52, 0xac, 0x0b, 0x38, 0x56, 0x14, 0xf6, 0x07, 0xd9, 0xe9, + 0xc3, 0xfa, 0xf4, 0x60, 0xe1, 0xc5, 0x7e, 0x72, 0x44, 0x8d, 0x06, 0x33, 0x8a, 0x2e, 0xea, 0x41, + 0xcc, 0x3a, 0x6f, 0xf6, 0xb4, 0x62, 0xfd, 0x45, 0x64, 0x1c, 0xe9, 0x0c, 0x7d, 0x3c, 0xe5, 0x1e, + 0xf3, 0x6c, 0x97, 0x53, 0xe3, 0x00, 0x0e, 0x31, 0x2c, 0xcb, 0x14, 0xcb, 0xc1, 0xb3, 0x52, 0x11, + 0xeb, 0x42, 0xcb, 0x32, 0x25, 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa3, 0x38, + 0x18, 0xb1, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0x2e, 0x08, 0x85, 0x02, 0xb7, 0x0b, 0x3c, 0x9c, 0x50, + 0x28, 0xc8, 0xee, 0xd2, 0xb4, 0x40, 0x17, 0x61, 0x98, 0xdc, 0x8e, 0x48, 0xe0, 0x39, 0x0d, 0x5a, + 0x43, 0x7f, 0x1c, 0x3f, 0x73, 0x29, 0x06, 0x63, 0x9d, 0x06, 0x6d, 0xc0, 0x78, 0xc8, 0xf5, 0x6c, + 0x2a, 0x04, 0x3e, 0xd7, 0x57, 0x3e, 0xa5, 0xde, 0x53, 0x9b, 0xe8, 0x7b, 0x0c, 0xc4, 0x77, 0x27, + 0x19, 0x65, 0x22, 0xc9, 0x02, 0xbd, 0x06, 0x63, 0x0d, 0xdf, 0xa9, 0xcf, 0x3b, 0x0d, 0xc7, 0xab, + 0xb1, 0xfe, 0x19, 0x32, 0x73, 0x95, 0xaf, 0x1a, 0x58, 0x9c, 0xa0, 0xa6, 0xc2, 0x9b, 0x0e, 0x11, + 0x61, 0xda, 0x1c, 0x6f, 0x8b, 0x84, 0xd3, 0x25, 0xf6, 0x55, 0x4c, 0x78, 0x5b, 0xcd, 0xa1, 0xc1, + 0xb9, 0xa5, 0xd1, 0x4b, 0x30, 0x22, 0x3f, 0x5f, 0x0b, 0xca, 0x12, 0x3f, 0x89, 0xd1, 0x70, 0xd8, + 0xa0, 0x44, 0x21, 0x1c, 0x97, 0xff, 0x37, 0x02, 0x67, 0x73, 0xd3, 0xad, 0x89, 0x48, 0x05, 0xfc, + 0xf9, 0xf0, 0x87, 0xe5, 0x5b, 0xc5, 0xa5, 0x2c, 0xa2, 0x7b, 0xfb, 0xe5, 0x53, 0xa2, 0xd7, 0x32, + 0xf1, 0x38, 0x9b, 0x37, 0x5a, 0x83, 0xa9, 0x6d, 0xe2, 0x34, 0xa2, 0xed, 0x85, 0x6d, 0x52, 0xdb, + 0x91, 0x0b, 0x8e, 0x85, 0x79, 0xd1, 0x9e, 0x8e, 0x5c, 0x4e, 0x93, 0xe0, 0xac, 0x72, 0xe8, 0x2d, + 0x98, 0x6e, 0xb5, 0x6f, 0x36, 0xdc, 0x70, 0x7b, 0xdd, 0x8f, 0x98, 0x13, 0xd2, 0x5c, 0xbd, 0x1e, + 0x90, 0x90, 0xbf, 0x2e, 0x65, 0x47, 0xaf, 0x0c, 0xa4, 0x53, 0xc9, 0xa1, 0xc3, 0xb9, 0x1c, 0xd0, + 0x1d, 0x38, 0x9e, 0x98, 0x08, 0x22, 0x22, 0xc6, 0x58, 0x7e, 0x02, 0x9c, 0x6a, 0x56, 0x01, 0x11, + 0x5c, 0x26, 0x0b, 0x85, 0xb3, 0xab, 0x40, 0x2f, 0x03, 0xb8, 0xad, 0x65, 0xa7, 0xe9, 0x36, 0xe8, + 0x55, 0x71, 0x8a, 0xcd, 0x11, 0x7a, 0x6d, 0x80, 0x95, 0x8a, 0x84, 0xd2, 0xbd, 0x59, 0xfc, 0xdb, + 0xc3, 0x1a, 0x35, 0x5a, 0x85, 0x31, 0xf1, 0x6f, 0x4f, 0x0c, 0xe9, 0xa4, 0xca, 0x95, 0x38, 0x26, + 0x4b, 0xa8, 0x71, 0x4c, 0x40, 0x70, 0xa2, 0x2c, 0xda, 0x82, 0xd3, 0x32, 0x51, 0xa3, 0x3e, 0x3f, + 0xe5, 0x18, 0x84, 0x2c, 0xeb, 0xcc, 0x10, 0x7f, 0x95, 0x32, 0xd7, 0x89, 0x10, 0x77, 0xe6, 0x43, + 0xcf, 0x75, 0x7d, 0x9a, 0xf3, 0x37, 0xc7, 0xc7, 0xe3, 0x88, 0x83, 0xab, 0x49, 0x24, 0x4e, 0xd3, + 0x23, 0x1f, 0x8e, 0xbb, 0x5e, 0xd6, 0xac, 0x3e, 0xc1, 0x18, 0x7d, 0x88, 0x3f, 0xb7, 0xee, 0x3c, + 0xa3, 0x33, 0xf1, 0x38, 0x9b, 0xef, 0x3b, 0xf3, 0xfb, 0xfb, 0x43, 0x8b, 0x96, 0xd6, 0xa4, 0x73, + 0xf4, 0x29, 0x18, 0xd1, 0x3f, 0x4a, 0x48, 0x1a, 0xe7, 0xb2, 0x85, 0x57, 0x6d, 0x4f, 0xe0, 0xb2, + 0xbd, 0x5a, 0xf7, 0x3a, 0x0e, 0x1b, 0x1c, 0x51, 0x2d, 0x23, 0xb6, 0xc1, 0x85, 0xde, 0x24, 0x99, + 0xde, 0xdd, 0xde, 0x08, 0x64, 0x4f, 0x77, 0xb4, 0x0a, 0x43, 0xb5, 0x86, 0x4b, 0xbc, 0x68, 0xa5, + 0xd2, 0x29, 0x7a, 0xe3, 0x82, 0xa0, 0x11, 0xeb, 0x47, 0x24, 0x90, 0xe1, 0x30, 0xac, 0x38, 0xd8, + 0x2f, 0xc1, 0x70, 0xb5, 0x41, 0x48, 0x8b, 0x3f, 0xdf, 0x41, 0x4f, 0xb2, 0xdb, 0x04, 0x93, 0x07, + 0x2d, 0x26, 0x0f, 0xea, 0x17, 0x05, 0x26, 0x09, 0x4a, 0xbc, 0xfd, 0x5b, 0x05, 0x28, 0x77, 0xc9, + 0x63, 0x94, 0x30, 0x60, 0x59, 0x3d, 0x19, 0xb0, 0xe6, 0x60, 0x3c, 0xfe, 0xa7, 0xeb, 0xc6, 0x94, + 0x0f, 0xec, 0x75, 0x13, 0x8d, 0x93, 0xf4, 0x3d, 0x3f, 0x67, 0xd0, 0x6d, 0x60, 0x7d, 0x5d, 0x1f, + 0xe4, 0x18, 0xb6, 0xef, 0xfe, 0xde, 0x2f, 0xcc, 0xb9, 0x76, 0x4c, 0xfb, 0xeb, 0x05, 0x38, 0xae, + 0xba, 0xf0, 0x3b, 0xb7, 0xe3, 0xae, 0xa5, 0x3b, 0xee, 0x10, 0xac, 0xc0, 0xf6, 0x55, 0x18, 0xe0, + 0x81, 0x2c, 0x7b, 0x10, 0xd4, 0x1f, 0x35, 0xe3, 0x6b, 0x2b, 0xd9, 0xd0, 0x88, 0xb1, 0xfd, 0x03, + 0x16, 0x8c, 0x27, 0xde, 0xc5, 0x21, 0xac, 0x3d, 0x9e, 0xbe, 0x1f, 0x61, 0x3a, 0x4b, 0x4c, 0x3f, + 0x0b, 0x7d, 0xdb, 0x7e, 0x18, 0x25, 0x5d, 0x44, 0x2e, 0xfb, 0x61, 0x84, 0x19, 0xc6, 0xfe, 0x63, + 0x0b, 0xfa, 0x37, 0x1c, 0xd7, 0x8b, 0xa4, 0x39, 0xc1, 0xca, 0x31, 0x27, 0xf4, 0xf2, 0x5d, 0xe8, + 0x45, 0x18, 0x20, 0x9b, 0x9b, 0xa4, 0x16, 0x89, 0x51, 0x95, 0x41, 0x14, 0x06, 0x96, 0x18, 0x94, + 0x4a, 0x8e, 0xac, 0x32, 0xfe, 0x17, 0x0b, 0x62, 0x74, 0x03, 0x4a, 0x91, 0xdb, 0x24, 0x73, 0xf5, + 0xba, 0x30, 0xb2, 0xdf, 0x47, 0xe4, 0x8f, 0x0d, 0xc9, 0x00, 0xc7, 0xbc, 0xec, 0x2f, 0x15, 0x00, + 0xe2, 0x08, 0x60, 0xdd, 0x3e, 0x71, 0x3e, 0x65, 0x7e, 0x3d, 0x97, 0x61, 0x7e, 0x45, 0x31, 0xc3, + 0x0c, 0xdb, 0xab, 0xea, 0xa6, 0x62, 0x4f, 0xdd, 0xd4, 0x77, 0x90, 0x6e, 0x5a, 0x80, 0xc9, 0x38, + 0x82, 0x99, 0x19, 0xc0, 0x91, 0x1d, 0xba, 0x1b, 0x49, 0x24, 0x4e, 0xd3, 0xdb, 0x04, 0xce, 0xaa, + 0x40, 0x4e, 0xe2, 0x2c, 0x64, 0x1e, 0xe4, 0xba, 0x39, 0xbb, 0x4b, 0x3f, 0xc5, 0xf6, 0xe5, 0x42, + 0xae, 0x7d, 0xf9, 0x27, 0x2c, 0x38, 0x96, 0xac, 0x87, 0x3d, 0xb7, 0xfe, 0x82, 0x05, 0xc7, 0xe3, + 0x34, 0x1e, 0x69, 0x9b, 0xfe, 0x0b, 0x1d, 0x83, 0x53, 0xe5, 0xb4, 0x38, 0x8e, 0xd6, 0xb1, 0x96, + 0xc5, 0x1a, 0x67, 0xd7, 0x68, 0xff, 0xd7, 0x3e, 0x98, 0xce, 0x8b, 0x6a, 0xc5, 0x1e, 0x98, 0x38, + 0xb7, 0xab, 0x3b, 0xe4, 0x96, 0x70, 0xe3, 0x8f, 0x1f, 0x98, 0x70, 0x30, 0x96, 0xf8, 0x64, 0xe6, + 0x96, 0x42, 0x8f, 0x99, 0x5b, 0xb6, 0x61, 0xf2, 0xd6, 0x36, 0xf1, 0xae, 0x79, 0xa1, 0x13, 0xb9, + 0xe1, 0xa6, 0xcb, 0x2c, 0xd2, 0x7c, 0xde, 0xc8, 0xec, 0xe3, 0x93, 0x37, 0x92, 0x04, 0xf7, 0xf6, + 0xcb, 0xa7, 0x0d, 0x40, 0xdc, 0x64, 0xbe, 0x91, 0xe0, 0x34, 0xd3, 0x74, 0xe2, 0x9b, 0xbe, 0x07, + 0x9c, 0xf8, 0xa6, 0xe9, 0x0a, 0x3f, 0x16, 0xf9, 0x7a, 0x80, 0xdd, 0x35, 0xd7, 0x14, 0x14, 0x6b, + 0x14, 0xe8, 0x13, 0x80, 0xf4, 0xcc, 0x65, 0x46, 0x50, 0xd1, 0x67, 0xef, 0xee, 0x97, 0xd1, 0x7a, + 0x0a, 0x7b, 0x6f, 0xbf, 0x3c, 0x45, 0xa1, 0x2b, 0x1e, 0xbd, 0xb3, 0xc6, 0x91, 0xd8, 0x32, 0x18, + 0xa1, 0x1b, 0x30, 0x41, 0xa1, 0x6c, 0x45, 0xc9, 0x88, 0xa5, 0xfc, 0x9e, 0xf9, 0xf4, 0xdd, 0xfd, + 0xf2, 0xc4, 0x7a, 0x02, 0x97, 0xc7, 0x3a, 0xc5, 0x24, 0x23, 0xff, 0xcd, 0x50, 0xaf, 0xf9, 0x6f, + 0xec, 0x2f, 0x58, 0x70, 0x92, 0x1e, 0x70, 0xf5, 0xd5, 0x1c, 0xb3, 0xb4, 0xd3, 0x72, 0xb9, 0xe1, + 0x43, 0x1c, 0x35, 0x4c, 0xc1, 0x56, 0x59, 0xe1, 0x66, 0x0f, 0x85, 0xa5, 0x3b, 0xfc, 0x8e, 0xeb, + 0xd5, 0x93, 0x3b, 0xfc, 0x15, 0xd7, 0xab, 0x63, 0x86, 0x51, 0x47, 0x56, 0x31, 0xf7, 0x11, 0xc3, + 0x57, 0xe9, 0x5a, 0xa5, 0x6d, 0xf9, 0x96, 0x36, 0x03, 0x3d, 0xad, 0x1b, 0x29, 0x85, 0x3f, 0x62, + 0xae, 0x81, 0xf2, 0xf3, 0x16, 0x88, 0x47, 0xcf, 0x3d, 0x9c, 0xc9, 0x6f, 0xc2, 0xc8, 0x6e, 0x3a, + 0xab, 0xe3, 0xd9, 0xfc, 0x57, 0xe0, 0x22, 0x56, 0xbb, 0x12, 0xd1, 0x8d, 0x0c, 0x8e, 0x06, 0x2f, + 0xbb, 0x0e, 0x02, 0xbb, 0x48, 0x98, 0x29, 0xa2, 0x7b, 0x6b, 0x9e, 0x03, 0xa8, 0x33, 0x5a, 0x96, + 0xea, 0xb9, 0x60, 0x4a, 0x5c, 0x8b, 0x0a, 0x83, 0x35, 0x2a, 0xfb, 0x77, 0x0b, 0x30, 0x2c, 0xb3, + 0x08, 0xb6, 0xbd, 0x5e, 0x14, 0x86, 0x07, 0x4a, 0x2b, 0x8e, 0x2e, 0x40, 0x89, 0x69, 0xb4, 0x2b, + 0xb1, 0x9e, 0x55, 0xe9, 0x93, 0xd6, 0x24, 0x02, 0xc7, 0x34, 0x4c, 0x7c, 0x6f, 0xdf, 0x64, 0xe4, + 0x89, 0x27, 0xba, 0x55, 0x0e, 0xc6, 0x12, 0x8f, 0x3e, 0x0a, 0x13, 0xbc, 0x5c, 0xe0, 0xb7, 0x9c, + 0x2d, 0x6e, 0x05, 0xeb, 0x57, 0x71, 0x4f, 0x26, 0xd6, 0x12, 0xb8, 0x7b, 0xfb, 0xe5, 0x63, 0x49, + 0x18, 0x33, 0xef, 0xa6, 0xb8, 0x30, 0x67, 0x37, 0x5e, 0x09, 0xdd, 0xd5, 0x53, 0x3e, 0x72, 0x31, + 0x0a, 0xeb, 0x74, 0xf6, 0xa7, 0x00, 0xa5, 0xf3, 0x29, 0xa2, 0xd7, 0xb9, 0xb3, 0xb4, 0x1b, 0x90, + 0x7a, 0x27, 0x73, 0xaf, 0x1e, 0xdd, 0x43, 0xbe, 0xae, 0xe3, 0xa5, 0xb0, 0x2a, 0x6f, 0xff, 0x60, + 0x1f, 0x4c, 0x24, 0xe3, 0x09, 0xa0, 0xcb, 0x30, 0xc0, 0x45, 0x4a, 0xc1, 0xbe, 0x83, 0x37, 0x91, + 0x16, 0x85, 0x80, 0x1d, 0xae, 0x42, 0x2a, 0x15, 0xe5, 0xd1, 0x5b, 0x30, 0x5c, 0xf7, 0x6f, 0x79, + 0xb7, 0x9c, 0xa0, 0x3e, 0x57, 0x59, 0x11, 0xd3, 0x39, 0x53, 0xc5, 0xb1, 0x18, 0x93, 0xe9, 0x91, + 0x0d, 0x98, 0xe5, 0x3c, 0x46, 0x61, 0x9d, 0x1d, 0xda, 0x60, 0x29, 0x42, 0x36, 0xdd, 0xad, 0x35, + 0xa7, 0xd5, 0xe9, 0xe5, 0xcc, 0x82, 0x24, 0xd2, 0x38, 0x8f, 0x8a, 0x3c, 0x22, 0x1c, 0x81, 0x63, + 0x46, 0xe8, 0x33, 0x30, 0x15, 0xe6, 0x18, 0x5d, 0xf2, 0xd2, 0xeb, 0x76, 0xb2, 0x43, 0xcc, 0x3f, + 0x74, 0x77, 0xbf, 0x3c, 0x95, 0x65, 0x9e, 0xc9, 0xaa, 0x06, 0xdd, 0x06, 0x24, 0x94, 0x9b, 0x1b, + 0x41, 0x3b, 0x8c, 0xe6, 0xdb, 0x5e, 0xbd, 0x21, 0x53, 0x88, 0x64, 0x27, 0xe0, 0x4e, 0x51, 0x6b, + 0x75, 0xb3, 0xf8, 0xa2, 0x69, 0x0a, 0x9c, 0x51, 0x87, 0xfd, 0xf9, 0x3e, 0x98, 0x91, 0x09, 0x4c, + 0x33, 0x5e, 0x08, 0x7c, 0xce, 0x4a, 0x3c, 0x11, 0x78, 0x39, 0x7f, 0x57, 0x7a, 0x60, 0x0f, 0x05, + 0xbe, 0x98, 0x7e, 0x28, 0xf0, 0xea, 0x01, 0x9b, 0x71, 0x68, 0xcf, 0x05, 0xbe, 0x63, 0x7d, 0xfc, + 0xbf, 0x7c, 0x0c, 0x8c, 0x73, 0xc4, 0x48, 0xf8, 0x6f, 0x1d, 0x52, 0xc2, 0x7f, 0x0c, 0x43, 0xa4, + 0xd9, 0x8a, 0xf6, 0x16, 0xdd, 0x40, 0xb4, 0x38, 0x93, 0xe7, 0x92, 0xa0, 0x49, 0xf3, 0x94, 0x18, + 0xac, 0xf8, 0xa0, 0x5d, 0x98, 0xdc, 0xaa, 0x91, 0x44, 0xce, 0xef, 0x62, 0xfe, 0xba, 0xbd, 0xb4, + 0xb0, 0xd4, 0x21, 0xe1, 0x37, 0xbb, 0xa9, 0xa4, 0x48, 0x70, 0xba, 0x0a, 0x96, 0x6f, 0xdc, 0xb9, + 0x15, 0x2e, 0x35, 0x9c, 0x30, 0x72, 0x6b, 0xf3, 0x0d, 0xbf, 0xb6, 0x53, 0x8d, 0xfc, 0x40, 0x26, + 0x1c, 0xcb, 0xbc, 0x28, 0xcc, 0xdd, 0xa8, 0xa6, 0xe8, 0xd3, 0xf9, 0xc6, 0xb3, 0xa8, 0x70, 0x66, + 0x5d, 0x68, 0x1d, 0x06, 0xb7, 0xdc, 0x08, 0x93, 0x96, 0x2f, 0x76, 0x8b, 0xcc, 0xad, 0xf0, 0x12, + 0x27, 0x49, 0xe7, 0xff, 0x16, 0x08, 0x2c, 0x99, 0xa0, 0xd7, 0xd5, 0x21, 0x30, 0x90, 0xaf, 0x2d, + 0x4c, 0x7b, 0x5e, 0x65, 0x1e, 0x03, 0xaf, 0x41, 0xd1, 0xdb, 0x0c, 0x3b, 0xc5, 0x0b, 0x59, 0x5f, + 0xae, 0xa6, 0xf3, 0x72, 0xaf, 0x2f, 0x57, 0x31, 0x2d, 0xc8, 0x9e, 0x16, 0x86, 0xb5, 0xd0, 0x15, + 0xa9, 0x53, 0x32, 0x5f, 0x5a, 0xae, 0x54, 0x17, 0xaa, 0x2b, 0xe9, 0x5c, 0xe4, 0x0c, 0x8c, 0x79, + 0x71, 0x74, 0x1d, 0x4a, 0x5b, 0x7c, 0xe3, 0xdb, 0x0c, 0x45, 0x12, 0xe3, 0xcc, 0xc3, 0xe8, 0x92, + 0x24, 0x4a, 0x67, 0x20, 0x57, 0x28, 0x1c, 0xb3, 0x42, 0x9f, 0xb7, 0xe0, 0x78, 0x32, 0x0b, 0x34, + 0x7b, 0x10, 0x24, 0x9c, 0x94, 0x5e, 0xec, 0x25, 0x2d, 0x37, 0x2b, 0x60, 0x54, 0xc8, 0x14, 0xfc, + 0x99, 0x64, 0x38, 0xbb, 0x3a, 0xda, 0xd1, 0xc1, 0xcd, 0xba, 0x70, 0x96, 0xc9, 0xec, 0xe8, 0x44, + 0xf0, 0x14, 0xde, 0xd1, 0x78, 0x7e, 0x11, 0xd3, 0x82, 0x68, 0x03, 0x60, 0xb3, 0x41, 0x64, 0xc2, + 0xfa, 0x91, 0xfc, 0xd3, 0x7f, 0x59, 0x51, 0xc9, 0x6c, 0x41, 0x54, 0x26, 0x8c, 0xa1, 0x58, 0xe3, + 0x43, 0xa7, 0x52, 0xcd, 0xf5, 0xea, 0x24, 0x60, 0xe6, 0x93, 0x9c, 0xa9, 0xb4, 0xc0, 0x28, 0xd2, + 0x53, 0x89, 0xc3, 0xb1, 0xe0, 0xc0, 0x78, 0x91, 0xd6, 0xf6, 0x66, 0xd8, 0x29, 0x2c, 0xfe, 0x02, + 0x69, 0x6d, 0x27, 0x26, 0x14, 0xe7, 0xc5, 0xe0, 0x58, 0x70, 0xa0, 0x4b, 0x66, 0x93, 0x2e, 0x20, + 0x12, 0x4c, 0x8f, 0xe7, 0x2f, 0x99, 0x65, 0x4e, 0x92, 0x5e, 0x32, 0x02, 0x81, 0x25, 0x13, 0xf4, + 0x49, 0x53, 0xda, 0x99, 0x60, 0x3c, 0x9f, 0xee, 0x22, 0xed, 0x18, 0x7c, 0x3b, 0xcb, 0x3b, 0x2f, + 0x43, 0x61, 0xb3, 0xc6, 0xcc, 0x2e, 0x39, 0x0a, 0xee, 0xe5, 0x05, 0x83, 0x1b, 0x0b, 0x33, 0xbd, + 0xbc, 0x80, 0x0b, 0x9b, 0x35, 0x3a, 0xf5, 0x9d, 0x3b, 0xed, 0x80, 0x2c, 0xbb, 0x0d, 0x22, 0x42, + 0xe4, 0x67, 0x4e, 0xfd, 0x39, 0x49, 0x94, 0x9e, 0xfa, 0x0a, 0x85, 0x63, 0x56, 0x94, 0x6f, 0x2c, + 0x83, 0x4d, 0xe5, 0xf3, 0x55, 0xa2, 0x56, 0x9a, 0x6f, 0xa6, 0x14, 0xb6, 0x03, 0xa3, 0xbb, 0x61, + 0x6b, 0x9b, 0xc8, 0x5d, 0x91, 0x19, 0x84, 0x72, 0x5e, 0xd3, 0x5f, 0x17, 0x84, 0x6e, 0x10, 0xb5, + 0x9d, 0x46, 0x6a, 0x23, 0x67, 0x7a, 0x80, 0xeb, 0x3a, 0x33, 0x6c, 0xf2, 0xa6, 0x13, 0xe1, 0x6d, + 0x1e, 0xf2, 0x8a, 0x99, 0x86, 0x72, 0x26, 0x42, 0x46, 0x54, 0x2c, 0x3e, 0x11, 0x04, 0x02, 0x4b, + 0x26, 0xaa, 0xb3, 0xd9, 0x01, 0x74, 0xa2, 0x4b, 0x67, 0xa7, 0xda, 0x1b, 0x77, 0x36, 0x3b, 0x70, + 0x62, 0x56, 0xec, 0xa0, 0x69, 0x65, 0x24, 0xcc, 0x9e, 0x7e, 0x28, 0xff, 0xa0, 0xe9, 0x96, 0x60, + 0x9b, 0x1f, 0x34, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0xfa, 0x71, 0x2d, 0x19, 0xbd, 0x4c, 0x84, 0xf1, + 0x7f, 0x32, 0x27, 0xf8, 0x5f, 0x3a, 0xc4, 0x19, 0xff, 0x38, 0x85, 0xc2, 0x31, 0x2b, 0x54, 0x87, + 0xb1, 0x96, 0x11, 0x15, 0x93, 0xa5, 0x23, 0xc8, 0x91, 0x0b, 0xb2, 0xe2, 0x67, 0x72, 0x75, 0x86, + 0x89, 0xc1, 0x09, 0x9e, 0xcc, 0x37, 0x8c, 0x3f, 0xf4, 0x62, 0xd9, 0x0a, 0x72, 0x86, 0x3a, 0xe3, + 0x2d, 0x18, 0x1f, 0x6a, 0x81, 0xc0, 0x92, 0x09, 0xed, 0x0d, 0xf1, 0x3c, 0xc9, 0x0f, 0x59, 0xd2, + 0x8f, 0x3c, 0x13, 0x6e, 0x96, 0x4d, 0x43, 0x86, 0x82, 0x16, 0x28, 0x1c, 0xb3, 0xa2, 0x3b, 0x39, + 0x3d, 0xf0, 0x4e, 0xe5, 0xef, 0xe4, 0xc9, 0xe3, 0x8e, 0xed, 0xe4, 0xf4, 0xb0, 0x2b, 0x8a, 0xa3, + 0x4e, 0x45, 0x2e, 0x66, 0x09, 0x0b, 0x72, 0xda, 0xa5, 0x42, 0x1f, 0xa7, 0xdb, 0xa5, 0x50, 0x38, + 0x66, 0x65, 0xff, 0x60, 0x01, 0xce, 0x74, 0x5e, 0x6f, 0xb1, 0xa1, 0xa6, 0x12, 0x7b, 0xb3, 0x24, + 0x0c, 0x35, 0x5c, 0x6d, 0x10, 0x53, 0xf5, 0x1c, 0xcc, 0xf4, 0x12, 0x4c, 0xaa, 0x47, 0x64, 0x0d, + 0xb7, 0xb6, 0xb7, 0x1e, 0x6b, 0x6a, 0x54, 0xd8, 0x8f, 0x6a, 0x92, 0x00, 0xa7, 0xcb, 0xa0, 0x39, + 0x18, 0x37, 0x80, 0x2b, 0x8b, 0x42, 0x3d, 0x10, 0x87, 0xc8, 0x37, 0xd1, 0x38, 0x49, 0x6f, 0xff, + 0x9c, 0x05, 0x0f, 0xe5, 0xe4, 0x2b, 0xee, 0x39, 0x56, 0xe7, 0x26, 0x8c, 0xb7, 0xcc, 0xa2, 0x5d, + 0xc2, 0x0b, 0x1b, 0x59, 0x91, 0x55, 0x5b, 0x13, 0x08, 0x9c, 0x64, 0x6a, 0xff, 0x4c, 0x01, 0x4e, + 0x77, 0xf4, 0x8a, 0x46, 0x18, 0x4e, 0x6c, 0x35, 0x43, 0x67, 0x21, 0x20, 0x75, 0xe2, 0x45, 0xae, + 0xd3, 0xa8, 0xb6, 0x48, 0x4d, 0x33, 0xb5, 0x31, 0xf7, 0xe2, 0x4b, 0x6b, 0xd5, 0xb9, 0x34, 0x05, + 0xce, 0x29, 0x89, 0x96, 0x01, 0xa5, 0x31, 0x62, 0x84, 0xd9, 0xd5, 0x34, 0xcd, 0x0f, 0x67, 0x94, + 0x40, 0x1f, 0x84, 0x51, 0xe5, 0x6d, 0xad, 0x8d, 0x38, 0xdb, 0xd8, 0xb1, 0x8e, 0xc0, 0x26, 0x1d, + 0xba, 0xc8, 0x73, 0xa7, 0x88, 0x2c, 0x3b, 0xc2, 0x2e, 0x37, 0x2e, 0x13, 0xa3, 0x08, 0x30, 0xd6, + 0x69, 0xe6, 0x5f, 0xfa, 0xed, 0x6f, 0x9e, 0x79, 0xdf, 0xef, 0x7f, 0xf3, 0xcc, 0xfb, 0xfe, 0xe8, + 0x9b, 0x67, 0xde, 0xf7, 0x3d, 0x77, 0xcf, 0x58, 0xbf, 0x7d, 0xf7, 0x8c, 0xf5, 0xfb, 0x77, 0xcf, + 0x58, 0x7f, 0x74, 0xf7, 0x8c, 0xf5, 0xef, 0xee, 0x9e, 0xb1, 0xbe, 0xf4, 0xa7, 0x67, 0xde, 0xf7, + 0x26, 0x8a, 0xa3, 0xdf, 0x5e, 0xa0, 0xa3, 0x73, 0x61, 0xf7, 0xe2, 0xff, 0x0a, 0x00, 0x00, 0xff, + 0xff, 0x31, 0xd1, 0xcb, 0x48, 0xf3, 0x1a, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -8265,6 +8423,70 @@ func (m *ClientIPConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ClusterTrustBundleProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x22 + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.SignerName != nil { + i -= len(*m.SignerName) + copy(dAtA[i:], *m.SignerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SignerName))) + i-- + dAtA[i] = 0x12 + } + if m.Name != nil { + i -= len(*m.Name) + copy(dAtA[i:], *m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -11705,6 +11927,18 @@ func (m *LifecycleHandler) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Sleep != nil { + { + size, err := m.Sleep.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } if m.TCPSocket != nil { { size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) @@ -12125,6 +12359,13 @@ func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x22 } } + if m.IPMode != nil { + i -= len(*m.IPMode) + copy(dAtA[i:], *m.IPMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPMode))) + i-- + dAtA[i] = 0x1a + } i -= len(m.Hostname) copy(dAtA[i:], m.Hostname) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) @@ -12238,6 +12479,39 @@ func (m *LocalVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ModifyVolumeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ModifyVolumeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ModifyVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.TargetVolumeAttributesClassName) + copy(dAtA[i:], m.TargetVolumeAttributesClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetVolumeAttributesClassName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -13757,6 +14031,13 @@ func (m *PersistentVolumeClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, erro _ = i var l int _ = l + if m.VolumeAttributesClassName != nil { + i -= len(*m.VolumeAttributesClassName) + copy(dAtA[i:], *m.VolumeAttributesClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeAttributesClassName))) + i-- + dAtA[i] = 0x4a + } if m.DataSourceRef != nil { { size, err := m.DataSourceRef.MarshalToSizedBuffer(dAtA[:i]) @@ -13854,6 +14135,25 @@ func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, er _ = i var l int _ = l + if m.ModifyVolumeStatus != nil { + { + size, err := m.ModifyVolumeStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.CurrentVolumeAttributesClassName != nil { + i -= len(*m.CurrentVolumeAttributesClassName) + copy(dAtA[i:], *m.CurrentVolumeAttributesClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CurrentVolumeAttributesClassName))) + i-- + dAtA[i] = 0x42 + } if len(m.AllocatedResourceStatuses) > 0 { keysForAllocatedResourceStatuses := make([]string, 0, len(m.AllocatedResourceStatuses)) for k := range m.AllocatedResourceStatuses { @@ -14414,6 +14714,13 @@ func (m *PersistentVolumeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.VolumeAttributesClassName != nil { + i -= len(*m.VolumeAttributesClassName) + copy(dAtA[i:], *m.VolumeAttributesClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeAttributesClassName))) + i-- + dAtA[i] = 0x52 + } if m.NodeAffinity != nil { { size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) @@ -14722,6 +15029,24 @@ func (m *PodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MismatchLabelKeys) > 0 { + for iNdEx := len(m.MismatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MismatchLabelKeys[iNdEx]) + copy(dAtA[i:], m.MismatchLabelKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MismatchLabelKeys[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.MatchLabelKeys) > 0 { + for iNdEx := len(m.MatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MatchLabelKeys[iNdEx]) + copy(dAtA[i:], m.MatchLabelKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MatchLabelKeys[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } if m.NamespaceSelector != nil { { size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -19244,6 +19569,32 @@ func (m *SessionAffinityConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SleepAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SleepAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SleepAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -19950,6 +20301,18 @@ func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ClusterTrustBundle != nil { + { + size, err := m.ClusterTrustBundle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if m.ServiceAccountToken != nil { { size, err := m.ServiceAccountToken.MarshalToSizedBuffer(dAtA[:i]) @@ -20001,6 +20364,87 @@ func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *VolumeResourceRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requests) > 0 { + keysForRequests := make([]string, 0, len(m.Requests)) + for k := range m.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- { + v := m.Requests[ResourceName(keysForRequests[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForRequests[iNdEx]) + copy(dAtA[i:], keysForRequests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Limits) > 0 { + keysForLimits := make([]string, 0, len(m.Limits)) + for k := range m.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + for iNdEx := len(keysForLimits) - 1; iNdEx >= 0; iNdEx-- { + v := m.Limits[ResourceName(keysForLimits[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForLimits[iNdEx]) + copy(dAtA[i:], keysForLimits[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLimits[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *VolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -20893,6 +21337,32 @@ func (m *ClientIPConfig) Size() (n int) { return n } +func (m *ClusterTrustBundleProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SignerName != nil { + l = len(*m.SignerName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + func (m *ComponentCondition) Size() (n int) { if m == nil { return 0 @@ -22168,6 +22638,10 @@ func (m *LifecycleHandler) Size() (n int) { l = m.TCPSocket.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Sleep != nil { + l = m.Sleep.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22299,6 +22773,10 @@ func (m *LoadBalancerIngress) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Hostname) n += 1 + l + sovGenerated(uint64(l)) + if m.IPMode != nil { + l = len(*m.IPMode) + n += 1 + l + sovGenerated(uint64(l)) + } if len(m.Ports) > 0 { for _, e := range m.Ports { l = e.Size() @@ -22349,6 +22827,19 @@ func (m *LocalVolumeSource) Size() (n int) { return n } +func (m *ModifyVolumeStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TargetVolumeAttributesClassName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *NFSVolumeSource) Size() (n int) { if m == nil { return 0 @@ -22930,6 +23421,10 @@ func (m *PersistentVolumeClaimSpec) Size() (n int) { l = m.DataSourceRef.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.VolumeAttributesClassName != nil { + l = len(*m.VolumeAttributesClassName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22979,6 +23474,14 @@ func (m *PersistentVolumeClaimStatus) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.CurrentVolumeAttributesClassName != nil { + l = len(*m.CurrentVolumeAttributesClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ModifyVolumeStatus != nil { + l = m.ModifyVolumeStatus.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -23166,6 +23669,10 @@ func (m *PersistentVolumeSpec) Size() (n int) { l = m.NodeAffinity.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.VolumeAttributesClassName != nil { + l = len(*m.VolumeAttributesClassName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -23259,6 +23766,18 @@ func (m *PodAffinityTerm) Size() (n int) { l = m.NamespaceSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchLabelKeys) > 0 { + for _, s := range m.MatchLabelKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MismatchLabelKeys) > 0 { + for _, s := range m.MismatchLabelKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -24900,6 +25419,16 @@ func (m *SessionAffinityConfig) Size() (n int) { return n } +func (m *SleepAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Seconds)) + return n +} + func (m *StorageOSPersistentVolumeSource) Size() (n int) { if m == nil { return 0 @@ -25193,6 +25722,37 @@ func (m *VolumeProjection) Size() (n int) { l = m.ServiceAccountToken.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ClusterTrustBundle != nil { + l = m.ClusterTrustBundle.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeResourceRequirements) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Limits) > 0 { + for k, v := range m.Limits { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } return n } @@ -25623,6 +26183,20 @@ func (this *ClientIPConfig) String() string { }, "") return s } +func (this *ClusterTrustBundleProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundleProjection{`, + `Name:` + valueToStringGenerated(this.Name) + `,`, + `SignerName:` + valueToStringGenerated(this.SignerName) + `,`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} func (this *ComponentCondition) String() string { if this == nil { return "nil" @@ -26577,6 +27151,7 @@ func (this *LifecycleHandler) String() string { `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `Sleep:` + strings.Replace(this.Sleep.String(), "SleepAction", "SleepAction", 1) + `,`, `}`, }, "") return s @@ -26716,6 +27291,7 @@ func (this *LoadBalancerIngress) String() string { s := strings.Join([]string{`&LoadBalancerIngress{`, `IP:` + fmt.Sprintf("%v", this.IP) + `,`, `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `IPMode:` + valueToStringGenerated(this.IPMode) + `,`, `Ports:` + repeatedStringForPorts + `,`, `}`, }, "") @@ -26757,6 +27333,17 @@ func (this *LocalVolumeSource) String() string { }, "") return s } +func (this *ModifyVolumeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ModifyVolumeStatus{`, + `TargetVolumeAttributesClassName:` + fmt.Sprintf("%v", this.TargetVolumeAttributesClassName) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `}`, + }, "") + return s +} func (this *NFSVolumeSource) String() string { if this == nil { return "nil" @@ -27206,13 +27793,14 @@ func (this *PersistentVolumeClaimSpec) String() string { } s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "VolumeResourceRequirements", "VolumeResourceRequirements", 1), `&`, ``, 1) + `,`, `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `DataSource:` + strings.Replace(this.DataSource.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, `DataSourceRef:` + strings.Replace(this.DataSourceRef.String(), "TypedObjectReference", "TypedObjectReference", 1) + `,`, + `VolumeAttributesClassName:` + valueToStringGenerated(this.VolumeAttributesClassName) + `,`, `}`, }, "") return s @@ -27263,6 +27851,8 @@ func (this *PersistentVolumeClaimStatus) String() string { `Conditions:` + repeatedStringForConditions + `,`, `AllocatedResources:` + mapStringForAllocatedResources + `,`, `AllocatedResourceStatuses:` + mapStringForAllocatedResourceStatuses + `,`, + `CurrentVolumeAttributesClassName:` + valueToStringGenerated(this.CurrentVolumeAttributesClassName) + `,`, + `ModifyVolumeStatus:` + strings.Replace(this.ModifyVolumeStatus.String(), "ModifyVolumeStatus", "ModifyVolumeStatus", 1) + `,`, `}`, }, "") return s @@ -27360,6 +27950,7 @@ func (this *PersistentVolumeSpec) String() string { `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, + `VolumeAttributesClassName:` + valueToStringGenerated(this.VolumeAttributesClassName) + `,`, `}`, }, "") return s @@ -27430,6 +28021,8 @@ func (this *PodAffinityTerm) String() string { `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchLabelKeys:` + fmt.Sprintf("%v", this.MatchLabelKeys) + `,`, + `MismatchLabelKeys:` + fmt.Sprintf("%v", this.MismatchLabelKeys) + `,`, `}`, }, "") return s @@ -28709,6 +29302,16 @@ func (this *SessionAffinityConfig) String() string { }, "") return s } +func (this *SleepAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SleepAction{`, + `Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`, + `}`, + }, "") + return s +} func (this *StorageOSPersistentVolumeSource) String() string { if this == nil { return "nil" @@ -28910,6 +29513,38 @@ func (this *VolumeProjection) String() string { `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, `ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, + `ClusterTrustBundle:` + strings.Replace(this.ClusterTrustBundle.String(), "ClusterTrustBundleProjection", "ClusterTrustBundleProjection", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeResourceRequirements) String() string { + if this == nil { + return "nil" + } + keysForLimits := make([]string, 0, len(this.Limits)) + for k := range this.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + mapStringForLimits := "ResourceList{" + for _, k := range keysForLimits { + mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + } + mapStringForLimits += "}" + keysForRequests := make([]string, 0, len(this.Requests)) + for k := range this.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + mapStringForRequests := "ResourceList{" + for _, k := range keysForRequests { + mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + } + mapStringForRequests += "}" + s := strings.Join([]string{`&VolumeResourceRequirements{`, + `Limits:` + mapStringForLimits + `,`, + `Requests:` + mapStringForRequests + `,`, `}`, }, "") return s @@ -32037,7 +32672,7 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentCondition) Unmarshal(dAtA []byte) error { +func (m *ClusterTrustBundleProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32060,15 +32695,15 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + return fmt.Errorf("proto: ClusterTrustBundleProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClusterTrustBundleProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32096,11 +32731,12 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32128,13 +32764,14 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.SignerName = &s iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32144,27 +32781,31 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + if m.LabelSelector == nil { + m.LabelSelector = &v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32192,8 +32833,29 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Error = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32215,7 +32877,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentStatus) Unmarshal(dAtA []byte) error { +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32238,17 +32900,17 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32258,28 +32920,206 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -43048,6 +43888,42 @@ func (m *LifecycleHandler) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sleep", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Sleep == nil { + m.Sleep = &SleepAction{} + } + if err := m.Sleep.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -44323,6 +45199,39 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := LoadBalancerIPMode(dAtA[iNdEx:postIndex]) + m.IPMode = &s + iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) @@ -44659,7 +45568,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ModifyVolumeStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -44682,15 +45591,15 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ModifyVolumeStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ModifyVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetVolumeAttributesClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -44718,11 +45627,11 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Server = string(dAtA[iNdEx:postIndex]) + m.TargetVolumeAttributesClassName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -44750,28 +45659,8 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Status = PersistentVolumeClaimModifyVolumeStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -44793,7 +45682,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Namespace) Unmarshal(dAtA []byte) error { +func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -44816,17 +45705,17 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44836,144 +45725,278 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Server = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Namespace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -49775,6 +50798,39 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributesClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VolumeAttributesClassName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -50308,6 +51364,75 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } m.AllocatedResourceStatuses[ResourceName(mapkey)] = ((ClaimResourceStatus)(mapvalue)) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentVolumeAttributesClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.CurrentVolumeAttributesClassName = &s + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ModifyVolumeStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ModifyVolumeStatus == nil { + m.ModifyVolumeStatus = &ModifyVolumeStatus{} + } + if err := m.ModifyVolumeStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -51930,6 +53055,39 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributesClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VolumeAttributesClassName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -52679,6 +53837,70 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchLabelKeys = append(m.MatchLabelKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MismatchLabelKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MismatchLabelKeys = append(m.MismatchLabelKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -66874,6 +68096,75 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } return nil } +func (m *SleepAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SleepAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SleepAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -69418,6 +70709,350 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterTrustBundle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterTrustBundle == nil { + m.ClusterTrustBundle = &ClusterTrustBundleProjection{} + } + if err := m.ClusterTrustBundle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeResourceRequirements) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeResourceRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limits == nil { + m.Limits = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Limits[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Requests == nil { + m.Requests = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Requests[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index 901e837313..cf9b6e6ebc 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -228,10 +228,8 @@ message CSIPersistentVolumeSource { // nodeExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodeExpandVolume call. - // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. // This field is optional, may be omitted if no secret is required. If the // secret object contains more than one secret, all secrets are passed. - // +featureGate=CSINodeExpandSecret // +optional optional SecretReference nodeExpandSecretRef = 10; } @@ -433,6 +431,40 @@ message ClientIPConfig { optional int32 timeoutSeconds = 1; } +// ClusterTrustBundleProjection describes how to select a set of +// ClusterTrustBundle objects and project their contents into the pod +// filesystem. +message ClusterTrustBundleProjection { + // Select a single ClusterTrustBundle by object name. Mutually-exclusive + // with signerName and labelSelector. + // +optional + optional string name = 1; + + // Select all ClusterTrustBundles that match this signer name. + // Mutually-exclusive with name. The contents of all selected + // ClusterTrustBundles will be unified and deduplicated. + // +optional + optional string signerName = 2; + + // Select all ClusterTrustBundles that match this label selector. Only has + // effect if signerName is set. Mutually-exclusive with name. If unset, + // interpreted as "match nothing". If set but empty, interpreted as "match + // everything". + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 3; + + // If true, don't block pod startup if the referenced ClusterTrustBundle(s) + // aren't available. If using name, then the named ClusterTrustBundle is + // allowed not to exist. If using signerName, then the combination of + // signerName and labelSelector is allowed to match zero + // ClusterTrustBundles. + // +optional + optional bool optional = 5; + + // Relative path from the volume root to write the bundle. + optional string path = 4; +} + // Information about the condition of a component. message ComponentCondition { // Type of condition for a component. @@ -1159,7 +1191,7 @@ message EndpointPort { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -2088,6 +2120,11 @@ message LifecycleHandler { // lifecycle hooks will fail in runtime when tcp handler is specified. // +optional optional TCPSocketAction tcpSocket = 3; + + // Sleep represents the duration that the container should sleep before being terminated. + // +featureGate=PodLifecycleSleepAction + // +optional + optional SleepAction sleep = 4; } // LimitRange sets resource usage limits for each kind of resource in a Namespace. @@ -2171,6 +2208,15 @@ message LoadBalancerIngress { // +optional optional string hostname = 2; + // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. + // Setting this to "VIP" indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and node port or the pod's IP and port. + // Service implementations may use this information to adjust traffic routing. + // +optional + optional string ipMode = 3; + // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +listType=atomic @@ -2211,6 +2257,24 @@ message LocalVolumeSource { optional string fsType = 2; } +// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation +message ModifyVolumeStatus { + // targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled + optional string targetVolumeAttributesClassName = 1; + + // status is the status of the ControllerModifyVolume operation. It can be in any of following states: + // - Pending + // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as + // the specified VolumeAttributesClass not existing. + // - InProgress + // InProgress indicates that the volume is being modified. + // - Infeasible + // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To + // resolve the error, a valid VolumeAttributesClass needs to be specified. + // Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately. + optional string status = 2; +} + // Represents an NFS mount that lasts the lifetime of a pod. // NFS volumes do not support ownership management or SELinux relabeling. message NFSVolumeSource { @@ -2816,7 +2880,7 @@ message PersistentVolumeClaimSpec { // status field of the claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional - optional ResourceRequirements resources = 2; + optional VolumeResourceRequirements resources = 2; // volumeName is the binding reference to the PersistentVolume backing this claim. // +optional @@ -2868,6 +2932,22 @@ message PersistentVolumeClaimSpec { // (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. // +optional optional TypedObjectReference dataSourceRef = 8; + + // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + // If specified, the CSI driver will create or update the volume with the attributes defined + // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + // will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + // will be set by the persistentvolume controller if it exists. + // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + // exists. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + // (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + // +featureGate=VolumeAttributesClass + // +optional + optional string volumeAttributesClassName = 9; } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. @@ -2957,6 +3037,20 @@ message PersistentVolumeClaimStatus { // +mapType=granular // +optional map<string, string> allocatedResourceStatuses = 7; + + // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. + // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + optional string currentVolumeAttributesClassName = 8; + + // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. + // When this is unset, there is no ModifyVolume operation being attempted. + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + optional ModifyVolumeStatus modifyVolumeStatus = 9; } // PersistentVolumeClaimTemplate is used to produce @@ -3161,6 +3255,17 @@ message PersistentVolumeSpec { // This field influences the scheduling of pods that use this volume. // +optional optional VolumeNodeAffinity nodeAffinity = 9; + + // Name of VolumeAttributesClass to which this persistent volume belongs. Empty value + // is not allowed. When this field is not set, it indicates that this volume does not belong to any + // VolumeAttributesClass. This field is mutable and can be changed by the CSI driver + // after a volume has been updated successfully to a new class. + // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound + // PersistentVolumeClaims during the binding process. + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + optional string volumeAttributesClassName = 10; } // PersistentVolumeStatus is the current status of a persistent volume. @@ -3253,6 +3358,7 @@ message PodAffinity { // a pod of the set of pods is running message PodAffinityTerm { // A label query over a set of resources, in this case pods. + // If it's null, this PodAffinityTerm matches with no Pods. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; @@ -3277,6 +3383,32 @@ message PodAffinityTerm { // An empty selector ({}) matches all namespaces. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4; + + // MatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // +listType=atomic + // +optional + repeated string matchLabelKeys = 5; + + // MismatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + // Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // +listType=atomic + // +optional + repeated string mismatchLabelKeys = 6; } // Pod anti affinity is a group of inter pod anti affinity scheduling rules. @@ -5250,7 +5382,7 @@ message ServicePort { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -5553,6 +5685,12 @@ message SessionAffinityConfig { optional ClientIPConfig clientIP = 1; } +// SleepAction describes a "sleep" action. +message SleepAction { + // Seconds is the number of seconds to sleep. + optional int64 seconds = 1; +} + // Represents a StorageOS persistent volume resource. message StorageOSPersistentVolumeSource { // volumeName is the human-readable name of the StorageOS volume. Volume @@ -5960,6 +6098,39 @@ message VolumeProjection { // serviceAccountToken is information about the serviceAccountToken data to project // +optional optional ServiceAccountTokenProjection serviceAccountToken = 4; + + // ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + // of ClusterTrustBundle objects in an auto-updating file. + // + // Alpha, gated by the ClusterTrustBundleProjection feature gate. + // + // ClusterTrustBundle objects can either be selected by name, or by the + // combination of signer name and a label selector. + // + // Kubelet performs aggressive normalization of the PEM contents written + // into the pod filesystem. Esoteric PEM features such as inter-block + // comments and block headers are stripped. Certificates are deduplicated. + // The ordering of certificates within the file is arbitrary, and Kubelet + // may change the order over time. + // + // +featureGate=ClusterTrustBundleProjection + // +optional + optional ClusterTrustBundleProjection clusterTrustBundle = 5; +} + +// VolumeResourceRequirements describes the storage resource requirements for a volume. +message VolumeResourceRequirements { + // Limits describes the maximum amount of compute resources allowed. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> limits = 1; + + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. Requests cannot exceed Limits. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> requests = 2; } // Represents the source of a volume to mount. diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 9e05c22356..1aade3806f 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -363,6 +363,16 @@ type PersistentVolumeSpec struct { // This field influences the scheduling of pods that use this volume. // +optional NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"` + // Name of VolumeAttributesClass to which this persistent volume belongs. Empty value + // is not allowed. When this field is not set, it indicates that this volume does not belong to any + // VolumeAttributesClass. This field is mutable and can be changed by the CSI driver + // after a volume has been updated successfully to a new class. + // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound + // PersistentVolumeClaims during the binding process. + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"` } // VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -486,7 +496,7 @@ type PersistentVolumeClaimSpec struct { // status field of the claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional - Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` + Resources VolumeResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` // volumeName is the binding reference to the PersistentVolume backing this claim. // +optional VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"` @@ -533,6 +543,21 @@ type PersistentVolumeClaimSpec struct { // (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. // +optional DataSourceRef *TypedObjectReference `json:"dataSourceRef,omitempty" protobuf:"bytes,8,opt,name=dataSourceRef"` + // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + // If specified, the CSI driver will create or update the volume with the attributes defined + // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + // will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + // will be set by the persistentvolume controller if it exists. + // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + // exists. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + // (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + // +featureGate=VolumeAttributesClass + // +optional + VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"` } type TypedObjectReference struct { @@ -561,6 +586,11 @@ const ( PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" + + // Applying the target VolumeAttributesClass encountered an error + PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError" + // Volume is being modified + PersistentVolumeClaimVolumeModifyingVolume PersistentVolumeClaimConditionType = "ModifyingVolume" ) // +enum @@ -587,6 +617,38 @@ const ( PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed" ) +// +enum +// New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately +type PersistentVolumeClaimModifyVolumeStatus string + +const ( + // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as + // the specified VolumeAttributesClass not existing + PersistentVolumeClaimModifyVolumePending PersistentVolumeClaimModifyVolumeStatus = "Pending" + // InProgress indicates that the volume is being modified + PersistentVolumeClaimModifyVolumeInProgress PersistentVolumeClaimModifyVolumeStatus = "InProgress" + // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To + // resolve the error, a valid VolumeAttributesClass needs to be specified + PersistentVolumeClaimModifyVolumeInfeasible PersistentVolumeClaimModifyVolumeStatus = "Infeasible" +) + +// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation +type ModifyVolumeStatus struct { + // targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled + TargetVolumeAttributesClassName string `json:"targetVolumeAttributesClassName,omitempty" protobuf:"bytes,1,opt,name=targetVolumeAttributesClassName"` + // status is the status of the ControllerModifyVolume operation. It can be in any of following states: + // - Pending + // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as + // the specified VolumeAttributesClass not existing. + // - InProgress + // InProgress indicates that the volume is being modified. + // - Infeasible + // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To + // resolve the error, a valid VolumeAttributesClass needs to be specified. + // Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately. + Status PersistentVolumeClaimModifyVolumeStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=PersistentVolumeClaimModifyVolumeStatus"` +} + // PersistentVolumeClaimCondition contains details about state of pvc type PersistentVolumeClaimCondition struct { Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` @@ -693,6 +755,18 @@ type PersistentVolumeClaimStatus struct { // +mapType=granular // +optional AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"` + // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. + // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"` + // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. + // When this is unset, there is no ModifyVolume operation being attempted. + // This is an alpha field and requires enabling VolumeAttributesClass feature. + // +featureGate=VolumeAttributesClass + // +optional + ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"` } // +enum @@ -1763,6 +1837,40 @@ type ServiceAccountTokenProjection struct { Path string `json:"path" protobuf:"bytes,3,opt,name=path"` } +// ClusterTrustBundleProjection describes how to select a set of +// ClusterTrustBundle objects and project their contents into the pod +// filesystem. +type ClusterTrustBundleProjection struct { + // Select a single ClusterTrustBundle by object name. Mutually-exclusive + // with signerName and labelSelector. + // +optional + Name *string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"` + + // Select all ClusterTrustBundles that match this signer name. + // Mutually-exclusive with name. The contents of all selected + // ClusterTrustBundles will be unified and deduplicated. + // +optional + SignerName *string `json:"signerName,omitempty" protobuf:"bytes,2,rep,name=signerName"` + + // Select all ClusterTrustBundles that match this label selector. Only has + // effect if signerName is set. Mutually-exclusive with name. If unset, + // interpreted as "match nothing". If set but empty, interpreted as "match + // everything". + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,3,rep,name=labelSelector"` + + // If true, don't block pod startup if the referenced ClusterTrustBundle(s) + // aren't available. If using name, then the named ClusterTrustBundle is + // allowed not to exist. If using signerName, then the combination of + // signerName and labelSelector is allowed to match zero + // ClusterTrustBundles. + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,5,opt,name=optional"` + + // Relative path from the volume root to write the bundle. + Path string `json:"path" protobuf:"bytes,4,rep,name=path"` +} + // Represents a projected volume source type ProjectedVolumeSource struct { // sources is the list of volume projections @@ -1794,6 +1902,24 @@ type VolumeProjection struct { // serviceAccountToken is information about the serviceAccountToken data to project // +optional ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" protobuf:"bytes,4,opt,name=serviceAccountToken"` + + // ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + // of ClusterTrustBundle objects in an auto-updating file. + // + // Alpha, gated by the ClusterTrustBundleProjection feature gate. + // + // ClusterTrustBundle objects can either be selected by name, or by the + // combination of signer name and a label selector. + // + // Kubelet performs aggressive normalization of the PEM contents written + // into the pod filesystem. Esoteric PEM features such as inter-block + // comments and block headers are stripped. Certificates are deduplicated. + // The ordering of certificates within the file is arbitrary, and Kubelet + // may change the order over time. + // + // +featureGate=ClusterTrustBundleProjection + // +optional + ClusterTrustBundle *ClusterTrustBundleProjection `json:"clusterTrustBundle,omitempty" protobuf:"bytes,5,opt,name=clusterTrustBundle"` } const ( @@ -1894,10 +2020,8 @@ type CSIPersistentVolumeSource struct { // nodeExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodeExpandVolume call. - // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. // This field is optional, may be omitted if no secret is required. If the // secret object contains more than one secret, all secrets are passed. - // +featureGate=CSINodeExpandSecret // +optional NodeExpandSecretRef *SecretReference `json:"nodeExpandSecretRef,omitempty" protobuf:"bytes,10,opt,name=nodeExpandSecretRef"` } @@ -2272,6 +2396,12 @@ type ExecAction struct { Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` } +// SleepAction describes a "sleep" action. +type SleepAction struct { + // Seconds is the number of seconds to sleep. + Seconds int64 `json:"seconds" protobuf:"bytes,1,opt,name=seconds"` +} + // Probe describes a health check to be performed against a container to determine whether it is // alive or ready to receive traffic. type Probe struct { @@ -2417,6 +2547,27 @@ type ResourceRequirements struct { Claims []ResourceClaim `json:"claims,omitempty" protobuf:"bytes,3,opt,name=claims"` } +// VolumeResourceRequirements describes the storage resource requirements for a volume. +type VolumeResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. Requests cannot exceed Limits. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` + + // Claims got added by accident when volumes shared the ResourceRequirements struct + // with containers. Stripping the field got added in 1.27 and was backported to 1.26. + // Starting with Kubernetes 1.28, this field is not part of the volume API anymore. + // + // Future extensions must not use "claims" or field number 3. + // Claims []ResourceClaim `json:"claims,omitempty" protobuf:"bytes,3,opt,name=claims"` +} + // ResourceClaim references one entry in PodSpec.ResourceClaims. type ResourceClaim struct { // Name must match the name of one entry in pod.spec.resourceClaims of @@ -2646,6 +2797,10 @@ type LifecycleHandler struct { // lifecycle hooks will fail in runtime when tcp handler is specified. // +optional TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` + // Sleep represents the duration that the container should sleep before being terminated. + // +featureGate=PodLifecycleSleepAction + // +optional + Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"` } // Lifecycle describes actions that the management system should take in response to container lifecycle @@ -2845,6 +3000,9 @@ const ( // DisruptionTarget indicates the pod is about to be terminated due to a // disruption (such as preemption, eviction API or garbage-collection). DisruptionTarget PodConditionType = "DisruptionTarget" + // PodReadyToStartContainers pod sandbox is successfully configured and + // the pod is ready to launch containers. + PodReadyToStartContainers PodConditionType = "PodReadyToStartContainers" ) // These are reasons for a pod's transition to a condition. @@ -3136,6 +3294,7 @@ type WeightedPodAffinityTerm struct { // a pod of the set of pods is running type PodAffinityTerm struct { // A label query over a set of resources, in this case pods. + // If it's null, this PodAffinityTerm matches with no Pods. // +optional LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` // namespaces specifies a static list of namespace names that the term applies to. @@ -3157,6 +3316,30 @@ type PodAffinityTerm struct { // An empty selector ({}) matches all namespaces. // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,4,opt,name=namespaceSelector"` + // MatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // +listType=atomic + // +optional + MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"` + // MismatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + // Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // +listType=atomic + // +optional + MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"` } // Node affinity is a group of node affinity scheduling rules. @@ -4692,6 +4875,15 @@ type LoadBalancerIngress struct { // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. + // Setting this to "VIP" indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and node port or the pod's IP and port. + // Service implementations may use this information to adjust traffic routing. + // +optional + IPMode *LoadBalancerIPMode `json:"ipMode,omitempty" protobuf:"bytes,3,opt,name=ipMode"` + // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +listType=atomic @@ -4709,6 +4901,8 @@ const ( IPv4Protocol IPFamily = "IPv4" // IPv6Protocol indicates that this IP is IPv6 protocol IPv6Protocol IPFamily = "IPv6" + // IPFamilyUnknown indicates that this IP is unknown protocol + IPFamilyUnknown IPFamily = "" ) // IPFamilyPolicy represents the dual-stack-ness requested or required by a Service @@ -5003,7 +5197,7 @@ type ServicePort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -5247,7 +5441,7 @@ type EndpointPort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -7054,3 +7248,15 @@ type PortStatus struct { // +kubebuilder:validation:MaxLength=316 Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` } + +// LoadBalancerIPMode represents the mode of the LoadBalancer ingress IP +type LoadBalancerIPMode string + +const ( + // LoadBalancerIPModeVIP indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + LoadBalancerIPModeVIP LoadBalancerIPMode = "VIP" + // LoadBalancerIPModeProxy indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and port or the pod's IP and port. + LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy" +) diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 9734d8b41e..01152a0964 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -127,7 +127,7 @@ var map_CSIPersistentVolumeSource = map[string]string{ "nodeStageSecretRef": "nodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodePublishSecretRef": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This is a beta field which is enabled default by CSINodeExpandSecret feature gate. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -228,6 +228,19 @@ func (ClientIPConfig) SwaggerDoc() map[string]string { return map_ClientIPConfig } +var map_ClusterTrustBundleProjection = map[string]string{ + "": "ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem.", + "name": "Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector.", + "signerName": "Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated.", + "labelSelector": "Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as \"match nothing\". If set but empty, interpreted as \"match everything\".", + "optional": "If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.", + "path": "Relative path from the volume root to write the bundle.", +} + +func (ClusterTrustBundleProjection) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleProjection +} + var map_ComponentCondition = map[string]string{ "": "Information about the condition of a component.", "type": "Type of condition for a component. Valid value: \"Healthy\"", @@ -531,7 +544,7 @@ var map_EndpointPort = map[string]string{ "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -935,6 +948,7 @@ var map_LifecycleHandler = map[string]string{ "exec": "Exec specifies the action to take.", "httpGet": "HTTPGet specifies the http request to perform.", "tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.", + "sleep": "Sleep represents the duration that the container should sleep before being terminated.", } func (LifecycleHandler) SwaggerDoc() map[string]string { @@ -988,6 +1002,7 @@ var map_LoadBalancerIngress = map[string]string{ "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", + "ipMode": "IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. Setting this to \"VIP\" indicates that traffic is delivered to the node with the destination set to the load-balancer's IP and port. Setting this to \"Proxy\" indicates that traffic is delivered to the node or pod with the destination set to the node's IP and node port or the pod's IP and port. Service implementations may use this information to adjust traffic routing.", "ports": "Ports is a list of records of service ports If used, every port defined in the service should have an entry in it", } @@ -1023,6 +1038,16 @@ func (LocalVolumeSource) SwaggerDoc() map[string]string { return map_LocalVolumeSource } +var map_ModifyVolumeStatus = map[string]string{ + "": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation", + "targetVolumeAttributesClassName": "targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled", + "status": "status is the status of the ControllerModifyVolume operation. It can be in any of following states:\n - Pending\n Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as\n the specified VolumeAttributesClass not existing.\n - InProgress\n InProgress indicates that the volume is being modified.\n - Infeasible\n Infeasible indicates that the request has been rejected as invalid by the CSI driver. To\n\t resolve the error, a valid VolumeAttributesClass needs to be specified.\nNote: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.", +} + +func (ModifyVolumeStatus) SwaggerDoc() map[string]string { + return map_ModifyVolumeStatus +} + var map_NFSVolumeSource = map[string]string{ "": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", "server": "server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", @@ -1339,15 +1364,16 @@ func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimSpec = map[string]string{ - "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", - "accessModes": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - "selector": "selector is a label query over volumes to consider for binding.", - "resources": "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", - "volumeName": "volumeName is the binding reference to the PersistentVolume backing this claim.", - "storageClassName": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", - "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", - "dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", - "dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", + "accessModes": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "selector": "selector is a label query over volumes to consider for binding.", + "resources": "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + "volumeName": "volumeName is the binding reference to the PersistentVolume backing this claim.", + "storageClassName": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", + "dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", + "dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1355,13 +1381,15 @@ func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimStatus = map[string]string{ - "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", - "phase": "phase represents the current phase of PersistentVolumeClaim.", - "accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - "capacity": "capacity represents the actual resources of the underlying volume.", - "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", - "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", - "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", + "phase": "phase represents the current phase of PersistentVolumeClaim.", + "accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "capacity": "capacity represents the actual resources of the underlying volume.", + "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.", + "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.", } func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { @@ -1438,6 +1466,7 @@ var map_PersistentVolumeSpec = map[string]string{ "mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.", "nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", + "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1489,10 +1518,12 @@ func (PodAffinity) SwaggerDoc() map[string]string { var map_PodAffinityTerm = map[string]string{ "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running", - "labelSelector": "A label query over a set of resources, in this case pods.", + "labelSelector": "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.", "namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", "namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.", + "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", } func (PodAffinityTerm) SwaggerDoc() map[string]string { @@ -2325,7 +2356,7 @@ var map_ServicePort = map[string]string{ "": "ServicePort contains information on service's port.", "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", - "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", "port": "The port that will be exposed by this service.", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", "nodePort": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", @@ -2390,6 +2421,15 @@ func (SessionAffinityConfig) SwaggerDoc() map[string]string { return map_SessionAffinityConfig } +var map_SleepAction = map[string]string{ + "": "SleepAction describes a \"sleep\" action.", + "seconds": "Seconds is the number of seconds to sleep.", +} + +func (SleepAction) SwaggerDoc() map[string]string { + return map_SleepAction +} + var map_StorageOSPersistentVolumeSource = map[string]string{ "": "Represents a StorageOS persistent volume resource.", "volumeName": "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", @@ -2566,12 +2606,23 @@ var map_VolumeProjection = map[string]string{ "downwardAPI": "downwardAPI information about the downwardAPI data to project", "configMap": "configMap information about the configMap data to project", "serviceAccountToken": "serviceAccountToken is information about the serviceAccountToken data to project", + "clusterTrustBundle": "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.", } func (VolumeProjection) SwaggerDoc() map[string]string { return map_VolumeProjection } +var map_VolumeResourceRequirements = map[string]string{ + "": "VolumeResourceRequirements describes the storage resource requirements for a volume.", + "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", +} + +func (VolumeResourceRequirements) SwaggerDoc() map[string]string { + return map_VolumeResourceRequirements +} + var map_VolumeSource = map[string]string{ "": "Represents the source of a volume to mount. Only one of its members may be specified.", "hostPath": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index d76f0bbbcf..45172e0e23 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -466,6 +466,42 @@ func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleProjection) DeepCopyInto(out *ClusterTrustBundleProjection) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SignerName != nil { + in, out := &in.SignerName, &out.SignerName + *out = new(string) + **out = **in + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleProjection. +func (in *ClusterTrustBundleProjection) DeepCopy() *ClusterTrustBundleProjection { + if in == nil { + return nil + } + out := new(ClusterTrustBundleProjection) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { *out = *in @@ -2045,6 +2081,11 @@ func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) { *out = new(TCPSocketAction) **out = **in } + if in.Sleep != nil { + in, out := &in.Sleep, &out.Sleep + *out = new(SleepAction) + **out = **in + } return } @@ -2228,6 +2269,11 @@ func (in *List) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { *out = *in + if in.IPMode != nil { + in, out := &in.IPMode, &out.IPMode + *out = new(LoadBalancerIPMode) + **out = **in + } if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]PortStatus, len(*in)) @@ -2308,6 +2354,22 @@ func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyVolumeStatus) DeepCopyInto(out *ModifyVolumeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyVolumeStatus. +func (in *ModifyVolumeStatus) DeepCopy() *ModifyVolumeStatus { + if in == nil { + return nil + } + out := new(ModifyVolumeStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { *out = *in @@ -3056,6 +3118,11 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec *out = new(TypedObjectReference) (*in).DeepCopyInto(*out) } + if in.VolumeAttributesClassName != nil { + in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName + *out = new(string) + **out = **in + } return } @@ -3105,6 +3172,16 @@ func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimSt (*out)[key] = val } } + if in.CurrentVolumeAttributesClassName != nil { + in, out := &in.CurrentVolumeAttributesClassName, &out.CurrentVolumeAttributesClassName + *out = new(string) + **out = **in + } + if in.ModifyVolumeStatus != nil { + in, out := &in.ModifyVolumeStatus, &out.ModifyVolumeStatus + *out = new(ModifyVolumeStatus) + **out = **in + } return } @@ -3347,6 +3424,11 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { *out = new(VolumeNodeAffinity) (*in).DeepCopyInto(*out) } + if in.VolumeAttributesClassName != nil { + in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName + *out = new(string) + **out = **in + } return } @@ -3472,6 +3554,16 @@ func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MismatchLabelKeys != nil { + in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -5681,6 +5773,22 @@ func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SleepAction) DeepCopyInto(out *SleepAction) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SleepAction. +func (in *SleepAction) DeepCopy() *SleepAction { + if in == nil { + return nil + } + out := new(SleepAction) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { *out = *in @@ -6027,6 +6135,11 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { *out = new(ServiceAccountTokenProjection) (*in).DeepCopyInto(*out) } + if in.ClusterTrustBundle != nil { + in, out := &in.ClusterTrustBundle, &out.ClusterTrustBundle + *out = new(ClusterTrustBundleProjection) + (*in).DeepCopyInto(*out) + } return } @@ -6040,6 +6153,36 @@ func (in *VolumeProjection) DeepCopy() *VolumeProjection { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeResourceRequirements) DeepCopyInto(out *VolumeResourceRequirements) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeResourceRequirements. +func (in *VolumeResourceRequirements) DeepCopy() *VolumeResourceRequirements { + if in == nil { + return nil + } + out := new(VolumeResourceRequirements) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = *in diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto index 490ce89224..6d234017b7 100644 --- a/vendor/k8s.io/api/discovery/v1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1/generated.proto @@ -118,7 +118,7 @@ message EndpointHints { // +structType=atomic message EndpointPort { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -145,7 +145,7 @@ message EndpointPort { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go index efbb09918c..7ebb07ca35 100644 --- a/vendor/k8s.io/api/discovery/v1/types.go +++ b/vendor/k8s.io/api/discovery/v1/types.go @@ -168,7 +168,7 @@ type ForZone struct { // +structType=atomic type EndpointPort struct { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -195,7 +195,7 @@ type EndpointPort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // diff --git a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go index bef7745398..41c3060568 100644 --- a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go @@ -65,10 +65,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto index 8b6c360b0e..ec555a40b3 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -119,7 +119,7 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice message EndpointPort { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go index f09f7f320c..defd8e2ce6 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -172,7 +172,7 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice type EndpointPort struct { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go index b1d4c306cc..847d4d58e0 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -64,7 +64,7 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", "appProtocol": "appProtocol represents the application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go b/vendor/k8s.io/api/flowcontrol/v1/doc.go similarity index 73% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go rename to vendor/k8s.io/api/flowcontrol/v1/doc.go index a3d4d0c607..1bc51d4066 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go +++ b/vendor/k8s.io/api/flowcontrol/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,9 +17,8 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true -// +k8s:prerelease-lifecycle-gen=true // +groupName=flowcontrol.apiserver.k8s.io -// Package v1alpha1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io". -package v1alpha1 // import "k8s.io/api/flowcontrol/v1alpha1" +// Package v1 holds api types of version v1 for group "flowcontrol.apiserver.k8s.io". +package v1 // import "k8s.io/api/flowcontrol/v1" diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go similarity index 91% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go rename to vendor/k8s.io/api/flowcontrol/v1/generated.pb.go index b54e1ceefb..c235ba10de 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go @@ -15,9 +15,9 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1/generated.proto -package v1alpha1 +package v1 import ( fmt "fmt" @@ -46,7 +46,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{0} + return fileDescriptor_f8a25df358697d27, []int{0} } func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +74,7 @@ var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } func (*FlowDistinguisherMethod) ProtoMessage() {} func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{1} + return fileDescriptor_f8a25df358697d27, []int{1} } func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo func (m *FlowSchema) Reset() { *m = FlowSchema{} } func (*FlowSchema) ProtoMessage() {} func (*FlowSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{2} + return fileDescriptor_f8a25df358697d27, []int{2} } func (m *FlowSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } func (*FlowSchemaCondition) ProtoMessage() {} func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{3} + return fileDescriptor_f8a25df358697d27, []int{3} } func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } func (*FlowSchemaList) ProtoMessage() {} func (*FlowSchemaList) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{4} + return fileDescriptor_f8a25df358697d27, []int{4} } func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } func (*FlowSchemaSpec) ProtoMessage() {} func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{5} + return fileDescriptor_f8a25df358697d27, []int{5} } func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -214,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } func (*FlowSchemaStatus) ProtoMessage() {} func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{6} + return fileDescriptor_f8a25df358697d27, []int{6} } func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo func (m *GroupSubject) Reset() { *m = GroupSubject{} } func (*GroupSubject) ProtoMessage() {} func (*GroupSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{7} + return fileDescriptor_f8a25df358697d27, []int{7} } func (m *GroupSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo func (m *LimitResponse) Reset() { *m = LimitResponse{} } func (*LimitResponse) ProtoMessage() {} func (*LimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{8} + return fileDescriptor_f8a25df358697d27, []int{8} } func (m *LimitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{9} + return fileDescriptor_f8a25df358697d27, []int{9} } func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -326,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } func (*NonResourcePolicyRule) ProtoMessage() {} func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{10} + return fileDescriptor_f8a25df358697d27, []int{10} } func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } func (*PolicyRulesWithSubjects) ProtoMessage() {} func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{11} + return fileDescriptor_f8a25df358697d27, []int{11} } func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -382,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } func (*PriorityLevelConfiguration) ProtoMessage() {} func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{12} + return fileDescriptor_f8a25df358697d27, []int{12} } func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } func (*PriorityLevelConfigurationCondition) ProtoMessage() {} func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{13} + return fileDescriptor_f8a25df358697d27, []int{13} } func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } func (*PriorityLevelConfigurationList) ProtoMessage() {} func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{14} + return fileDescriptor_f8a25df358697d27, []int{14} } func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -466,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } func (*PriorityLevelConfigurationReference) ProtoMessage() {} func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{15} + return fileDescriptor_f8a25df358697d27, []int{15} } func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -494,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } func (*PriorityLevelConfigurationSpec) ProtoMessage() {} func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{16} + return fileDescriptor_f8a25df358697d27, []int{16} } func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } func (*PriorityLevelConfigurationStatus) ProtoMessage() {} func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{17} + return fileDescriptor_f8a25df358697d27, []int{17} } func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } func (*QueuingConfiguration) ProtoMessage() {} func (*QueuingConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{18} + return fileDescriptor_f8a25df358697d27, []int{18} } func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } func (*ResourcePolicyRule) ProtoMessage() {} func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{19} + return fileDescriptor_f8a25df358697d27, []int{19} } func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -606,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } func (*ServiceAccountSubject) ProtoMessage() {} func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{20} + return fileDescriptor_f8a25df358697d27, []int{20} } func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{21} + return fileDescriptor_f8a25df358697d27, []int{21} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -662,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo func (m *UserSubject) Reset() { *m = UserSubject{} } func (*UserSubject) ProtoMessage() {} func (*UserSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{22} + return fileDescriptor_f8a25df358697d27, []int{22} } func (m *UserSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -688,139 +688,137 @@ func (m *UserSubject) XXX_DiscardUnknown() { var xxx_messageInfo_UserSubject proto.InternalMessageInfo func init() { - proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration") - proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowDistinguisherMethod") - proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchema") - proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaCondition") - proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaList") - proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaSpec") - proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaStatus") - proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.GroupSubject") - proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitResponse") - proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration") - proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.NonResourcePolicyRule") - proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects") - proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfiguration") - proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition") - proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationList") - proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference") - proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec") - proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus") - proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.QueuingConfiguration") - proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.ResourcePolicyRule") - proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.ServiceAccountSubject") - proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1alpha1.Subject") - proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.UserSubject") + proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.ExemptPriorityLevelConfiguration") + proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1.FlowDistinguisherMethod") + proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1.FlowSchema") + proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaCondition") + proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaList") + proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaSpec") + proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaStatus") + proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1.GroupSubject") + proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1.LimitResponse") + proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.LimitedPriorityLevelConfiguration") + proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1.NonResourcePolicyRule") + proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1.PolicyRulesWithSubjects") + proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfiguration") + proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationCondition") + proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationList") + proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationReference") + proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationSpec") + proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationStatus") + proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1.QueuingConfiguration") + proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1.ResourcePolicyRule") + proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1.ServiceAccountSubject") + proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1.Subject") + proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1.UserSubject") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto", fileDescriptor_45ba024d525b289b) -} - -var fileDescriptor_45ba024d525b289b = []byte{ - // 1621 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x6f, 0xdb, 0x46, - 0x1a, 0x36, 0x65, 0xc9, 0xb6, 0xc6, 0x9f, 0x19, 0xc7, 0xb0, 0xd6, 0x59, 0x48, 0x0e, 0x17, 0xd8, - 0x64, 0x37, 0x09, 0x15, 0x67, 0x93, 0x6c, 0x16, 0xc1, 0x22, 0x30, 0x93, 0x6c, 0xbe, 0x6c, 0xc7, - 0x1e, 0x27, 0xd9, 0x36, 0x48, 0x81, 0xd0, 0xd4, 0x58, 0x9a, 0x58, 0x22, 0xd9, 0x19, 0x52, 0x8e, - 0x8b, 0x1c, 0x0a, 0xf4, 0x0f, 0xf4, 0x07, 0xe4, 0xd8, 0x43, 0x6f, 0x05, 0x7a, 0xed, 0xa5, 0xc7, - 0xa0, 0xe8, 0x21, 0xc7, 0x9c, 0x84, 0x58, 0xbd, 0xf6, 0x07, 0xb4, 0x39, 0x14, 0xc5, 0x0c, 0x87, - 0xa4, 0x28, 0x89, 0xa2, 0x52, 0x03, 0x39, 0xf5, 0x66, 0xbe, 0x1f, 0xcf, 0x3b, 0xf3, 0xce, 0xfb, - 0xf1, 0xc8, 0xe0, 0xf6, 0xde, 0x15, 0xa6, 0x11, 0xbb, 0xbc, 0xe7, 0xed, 0x60, 0x6a, 0x61, 0x17, - 0xb3, 0x72, 0x13, 0x5b, 0x15, 0x9b, 0x96, 0xa5, 0xc2, 0x70, 0x48, 0x79, 0xb7, 0x6e, 0xef, 0x9b, - 0xb6, 0xe5, 0x52, 0xbb, 0x5e, 0x6e, 0xae, 0x18, 0x75, 0xa7, 0x66, 0xac, 0x94, 0xab, 0xd8, 0xc2, - 0xd4, 0x70, 0x71, 0x45, 0x73, 0xa8, 0xed, 0xda, 0xb0, 0xe4, 0x3b, 0x68, 0x86, 0x43, 0xb4, 0x0e, - 0x07, 0x2d, 0x70, 0x58, 0x3a, 0x57, 0x25, 0x6e, 0xcd, 0xdb, 0xd1, 0x4c, 0xbb, 0x51, 0xae, 0xda, - 0x55, 0xbb, 0x2c, 0xfc, 0x76, 0xbc, 0x5d, 0xf1, 0x25, 0x3e, 0xc4, 0x5f, 0x3e, 0xde, 0xd2, 0xc5, - 0xe8, 0x00, 0x0d, 0xc3, 0xac, 0x11, 0x0b, 0xd3, 0x83, 0xb2, 0xb3, 0x57, 0xe5, 0x02, 0x56, 0x6e, - 0x60, 0xd7, 0x28, 0x37, 0x7b, 0x4e, 0xb1, 0x54, 0x4e, 0xf2, 0xa2, 0x9e, 0xe5, 0x92, 0x06, 0xee, - 0x71, 0xb8, 0x9c, 0xe6, 0xc0, 0xcc, 0x1a, 0x6e, 0x18, 0xdd, 0x7e, 0xea, 0x77, 0x0a, 0x58, 0xbe, - 0xf9, 0x1c, 0x37, 0x1c, 0x77, 0x93, 0x12, 0x9b, 0x12, 0xf7, 0x60, 0x0d, 0x37, 0x71, 0xfd, 0xba, - 0x6d, 0xed, 0x92, 0xaa, 0x47, 0x0d, 0x97, 0xd8, 0x16, 0xfc, 0x08, 0x14, 0x2c, 0xbb, 0x41, 0x2c, - 0x83, 0xcb, 0x4d, 0x8f, 0x52, 0x6c, 0x99, 0x07, 0xdb, 0x35, 0x83, 0x62, 0x56, 0x50, 0x96, 0x95, - 0xd3, 0x39, 0xfd, 0xaf, 0xed, 0x56, 0xa9, 0xb0, 0x91, 0x60, 0x83, 0x12, 0xbd, 0xe1, 0x7f, 0xc1, - 0x6c, 0x1d, 0x5b, 0x15, 0x63, 0xa7, 0x8e, 0x37, 0x31, 0x35, 0xb1, 0xe5, 0x16, 0x32, 0x02, 0x70, - 0xbe, 0xdd, 0x2a, 0xcd, 0xae, 0xc5, 0x55, 0xa8, 0xdb, 0x56, 0x7d, 0x0c, 0x16, 0xff, 0x57, 0xb7, - 0xf7, 0x6f, 0x10, 0xe6, 0x12, 0xab, 0xea, 0x11, 0x56, 0xc3, 0x74, 0x1d, 0xbb, 0x35, 0xbb, 0x02, - 0xaf, 0x81, 0xac, 0x7b, 0xe0, 0x60, 0x71, 0xbe, 0xbc, 0x7e, 0xe6, 0x55, 0xab, 0x34, 0xd2, 0x6e, - 0x95, 0xb2, 0x0f, 0x0e, 0x1c, 0xfc, 0xae, 0x55, 0x3a, 0x91, 0xe0, 0xc6, 0xd5, 0x48, 0x38, 0xaa, - 0x2f, 0x33, 0x00, 0x70, 0xab, 0x6d, 0x91, 0x38, 0xf8, 0x14, 0x4c, 0xf0, 0xc7, 0xaa, 0x18, 0xae, - 0x21, 0x30, 0x27, 0x2f, 0x9c, 0xd7, 0xa2, 0x52, 0x09, 0x73, 0xae, 0x39, 0x7b, 0x55, 0x2e, 0x60, - 0x1a, 0xb7, 0xd6, 0x9a, 0x2b, 0xda, 0xfd, 0x9d, 0x67, 0xd8, 0x74, 0xd7, 0xb1, 0x6b, 0xe8, 0x50, - 0x9e, 0x02, 0x44, 0x32, 0x14, 0xa2, 0xc2, 0x2d, 0x90, 0x65, 0x0e, 0x36, 0x45, 0x02, 0x26, 0x2f, - 0x94, 0xb5, 0x94, 0x42, 0xd4, 0xa2, 0xc3, 0x6d, 0x3b, 0xd8, 0xd4, 0xa7, 0x82, 0x2b, 0xf2, 0x2f, - 0x24, 0xa0, 0xe0, 0xc7, 0x60, 0x8c, 0xb9, 0x86, 0xeb, 0xb1, 0xc2, 0xa8, 0x00, 0x5d, 0x79, 0x1f, - 0x50, 0xe1, 0xa8, 0xcf, 0x48, 0xd8, 0x31, 0xff, 0x1b, 0x49, 0x40, 0xf5, 0x4d, 0x06, 0xcc, 0x47, - 0xc6, 0xd7, 0x6d, 0xab, 0x42, 0x44, 0xad, 0x5c, 0x8d, 0xe5, 0xfd, 0x54, 0x57, 0xde, 0x17, 0xfb, - 0xb8, 0x44, 0x39, 0x87, 0xff, 0x09, 0xcf, 0x9b, 0x11, 0xee, 0x27, 0xe3, 0xc1, 0xdf, 0xb5, 0x4a, - 0xb3, 0xa1, 0x5b, 0xfc, 0x3c, 0xb0, 0x09, 0x60, 0xdd, 0x60, 0xee, 0x03, 0x6a, 0x58, 0xcc, 0x87, - 0x25, 0x0d, 0x2c, 0xaf, 0xfd, 0xcf, 0xe1, 0x5e, 0x8a, 0x7b, 0xe8, 0x4b, 0x32, 0x24, 0x5c, 0xeb, - 0x41, 0x43, 0x7d, 0x22, 0xc0, 0xbf, 0x83, 0x31, 0x8a, 0x0d, 0x66, 0x5b, 0x85, 0xac, 0x38, 0x72, - 0x98, 0x2f, 0x24, 0xa4, 0x48, 0x6a, 0xe1, 0x3f, 0xc0, 0x78, 0x03, 0x33, 0x66, 0x54, 0x71, 0x21, - 0x27, 0x0c, 0x67, 0xa5, 0xe1, 0xf8, 0xba, 0x2f, 0x46, 0x81, 0x5e, 0xfd, 0x5e, 0x01, 0x33, 0x51, - 0x9e, 0xd6, 0x08, 0x73, 0xe1, 0x93, 0x9e, 0xea, 0xd3, 0x86, 0xbb, 0x13, 0xf7, 0x16, 0xb5, 0x37, - 0x27, 0xc3, 0x4d, 0x04, 0x92, 0x8e, 0xca, 0xdb, 0x04, 0x39, 0xe2, 0xe2, 0x06, 0xcf, 0xfa, 0xe8, - 0xe9, 0xc9, 0x0b, 0x67, 0xde, 0xa3, 0x4a, 0xf4, 0x69, 0x89, 0x9b, 0xbb, 0xc3, 0x11, 0x90, 0x0f, - 0xa4, 0xfe, 0x3c, 0xda, 0x79, 0x05, 0x5e, 0x91, 0xf0, 0x6b, 0x05, 0x2c, 0x39, 0x89, 0x33, 0x46, - 0xde, 0xea, 0x46, 0x6a, 0xe8, 0xe4, 0x31, 0x85, 0xf0, 0x2e, 0xe6, 0xb3, 0x05, 0xeb, 0xaa, 0x3c, - 0xd3, 0xd2, 0x00, 0xe3, 0x01, 0x67, 0x81, 0x77, 0x01, 0x6c, 0x18, 0x2e, 0xcf, 0x69, 0x75, 0x93, - 0x62, 0x13, 0x57, 0x38, 0xaa, 0x1c, 0x4c, 0x61, 0x7d, 0xac, 0xf7, 0x58, 0xa0, 0x3e, 0x5e, 0xf0, - 0x0b, 0x05, 0xcc, 0x57, 0x7a, 0x07, 0x8d, 0xac, 0xcc, 0x2b, 0x43, 0xa5, 0xba, 0xcf, 0xa0, 0xd2, - 0x17, 0xdb, 0xad, 0xd2, 0x7c, 0x1f, 0x05, 0xea, 0x17, 0x0d, 0x7e, 0x02, 0x72, 0xd4, 0xab, 0x63, - 0x56, 0xc8, 0x8a, 0x17, 0x4e, 0x0f, 0xbb, 0x69, 0xd7, 0x89, 0x79, 0x80, 0xb8, 0xcf, 0xff, 0x89, - 0x5b, 0xdb, 0xf6, 0xc4, 0xc4, 0x62, 0xd1, 0x73, 0x0b, 0x15, 0xf2, 0x51, 0xd5, 0x17, 0x60, 0xae, - 0x7b, 0x70, 0xc0, 0x1a, 0x00, 0x66, 0xd0, 0xab, 0x7c, 0x4d, 0xf0, 0xb8, 0x17, 0xdf, 0xa3, 0xb2, - 0xc2, 0x46, 0x8f, 0xc6, 0x66, 0x28, 0x62, 0xa8, 0x03, 0x5b, 0x3d, 0x0f, 0xa6, 0x6e, 0x51, 0xdb, - 0x73, 0xe4, 0x21, 0xe1, 0x32, 0xc8, 0x5a, 0x46, 0x23, 0x18, 0x41, 0xe1, 0x5c, 0xdc, 0x30, 0x1a, - 0x18, 0x09, 0x8d, 0xfa, 0x95, 0x02, 0xa6, 0xd7, 0x48, 0x83, 0xb8, 0x08, 0x33, 0xc7, 0xb6, 0x18, - 0x86, 0x97, 0x62, 0x63, 0xeb, 0x64, 0xd7, 0xd8, 0x3a, 0x16, 0x33, 0xee, 0x18, 0x58, 0x4f, 0xc0, - 0xf8, 0xa7, 0x1e, 0xf6, 0x88, 0x55, 0x95, 0x63, 0xfb, 0x52, 0xea, 0x0d, 0xb7, 0x7c, 0xfb, 0x58, - 0xc5, 0xe9, 0x93, 0x7c, 0x10, 0x48, 0x0d, 0x0a, 0x20, 0xd5, 0xdf, 0x32, 0xe0, 0xa4, 0x88, 0x8c, - 0x2b, 0x03, 0xb6, 0xf3, 0x13, 0x50, 0x30, 0x18, 0xf3, 0x28, 0xae, 0x24, 0x6d, 0xe7, 0x65, 0x79, - 0x9d, 0xc2, 0x6a, 0x82, 0x1d, 0x4a, 0x44, 0x80, 0x7b, 0x60, 0xba, 0xde, 0x79, 0x79, 0x79, 0x4f, - 0x2d, 0xf5, 0x9e, 0xb1, 0x94, 0xe9, 0x0b, 0xf2, 0x08, 0xf1, 0xb4, 0xa3, 0x38, 0x76, 0x3f, 0x3a, - 0x30, 0x3a, 0x3c, 0x1d, 0x80, 0xf7, 0xc1, 0xc2, 0x8e, 0x4d, 0xa9, 0xbd, 0x4f, 0xac, 0xaa, 0x88, - 0x13, 0x80, 0x64, 0x05, 0xc8, 0x5f, 0xda, 0xad, 0xd2, 0x82, 0xde, 0xcf, 0x00, 0xf5, 0xf7, 0x53, - 0xf7, 0xc1, 0xc2, 0x06, 0x1f, 0x2c, 0xcc, 0xf6, 0xa8, 0x89, 0xa3, 0x9e, 0x80, 0x25, 0x90, 0x6b, - 0x62, 0xba, 0xe3, 0xd7, 0x75, 0x5e, 0xcf, 0xf3, 0x8e, 0x78, 0xc4, 0x05, 0xc8, 0x97, 0xf3, 0x9b, - 0x58, 0x91, 0xe7, 0x43, 0xb4, 0xc6, 0x0a, 0x63, 0xc2, 0x54, 0xdc, 0x64, 0x23, 0xae, 0x42, 0xdd, - 0xb6, 0xea, 0x61, 0x06, 0x2c, 0x26, 0xb4, 0x20, 0x7c, 0x04, 0x26, 0x98, 0xfc, 0x5b, 0xb6, 0xd5, - 0xe9, 0xd4, 0xc7, 0x90, 0xce, 0xd1, 0x16, 0x08, 0xd0, 0x50, 0x88, 0x05, 0x1d, 0x30, 0x4d, 0xe5, - 0x19, 0x44, 0x50, 0xb9, 0x0d, 0xfe, 0x95, 0x0a, 0xde, 0x9b, 0x9f, 0xe8, 0xb9, 0x51, 0x27, 0x22, - 0x8a, 0x07, 0x80, 0x2f, 0xc0, 0x5c, 0xc7, 0xc5, 0xfd, 0xa0, 0xa3, 0x22, 0xe8, 0xe5, 0xd4, 0xa0, - 0x7d, 0xdf, 0x45, 0x2f, 0xc8, 0xb8, 0x73, 0x1b, 0x5d, 0xb8, 0xa8, 0x27, 0x92, 0xfa, 0x63, 0x06, - 0x0c, 0x58, 0x10, 0x1f, 0x80, 0xf0, 0x19, 0x31, 0xc2, 0x77, 0xed, 0x08, 0xab, 0x2f, 0x91, 0x00, - 0x92, 0x2e, 0x02, 0xb8, 0x7a, 0x94, 0x20, 0x83, 0x09, 0xe1, 0x2f, 0x19, 0xf0, 0xb7, 0x64, 0xe7, - 0x88, 0x20, 0xde, 0x8b, 0x4d, 0xda, 0x7f, 0x77, 0x4d, 0xda, 0x53, 0x43, 0x40, 0xfc, 0x49, 0x18, - 0xbb, 0x08, 0xe3, 0x5b, 0x05, 0x14, 0x93, 0xf3, 0xf6, 0x01, 0x08, 0xe4, 0xd3, 0x38, 0x81, 0xbc, - 0x7a, 0x84, 0x2a, 0x4b, 0x20, 0x94, 0xb7, 0x06, 0x15, 0x57, 0xc8, 0xfc, 0x86, 0x58, 0xfd, 0xdf, - 0x64, 0x06, 0xe5, 0x4a, 0x30, 0xd5, 0x94, 0x9f, 0x30, 0x31, 0xef, 0x9b, 0x16, 0x5f, 0x40, 0x0d, - 0xbe, 0x43, 0xfc, 0x8a, 0x24, 0x60, 0xbc, 0xee, 0xaf, 0x6c, 0xd9, 0xd7, 0xfa, 0x70, 0x9b, 0x72, - 0xd0, 0x8a, 0xf7, 0xe9, 0x81, 0x34, 0x43, 0x01, 0x3e, 0xc4, 0x60, 0x0c, 0x8b, 0x9f, 0xee, 0x43, - 0x37, 0x77, 0xda, 0x2f, 0x7d, 0x1d, 0xf0, 0x42, 0xf4, 0xad, 0x90, 0x04, 0x57, 0x5f, 0x2a, 0x60, - 0x39, 0x6d, 0x2a, 0xc0, 0xe7, 0x7d, 0xd8, 0xde, 0x51, 0xc8, 0xfc, 0xf0, 0xec, 0xef, 0x5b, 0x05, - 0x1c, 0xef, 0xc7, 0xa9, 0x78, 0xa3, 0x71, 0x22, 0x15, 0xb2, 0xa0, 0xb0, 0xd1, 0xb6, 0x84, 0x14, - 0x49, 0x2d, 0x3c, 0x0b, 0x26, 0x6a, 0x86, 0x55, 0xd9, 0x26, 0x9f, 0x05, 0x1c, 0x3f, 0x2c, 0xf5, - 0xdb, 0x52, 0x8e, 0x42, 0x0b, 0x78, 0x03, 0xcc, 0x09, 0xbf, 0x35, 0x6c, 0x55, 0xdd, 0x9a, 0x78, - 0x13, 0xc9, 0x51, 0xc2, 0xdd, 0xb3, 0xd5, 0xa5, 0x47, 0x3d, 0x1e, 0xea, 0xaf, 0x0a, 0x80, 0x7f, - 0x84, 0x56, 0x9c, 0x01, 0x79, 0xc3, 0x21, 0x82, 0xed, 0xfa, 0xcd, 0x96, 0xd7, 0xa7, 0xdb, 0xad, - 0x52, 0x7e, 0x75, 0xf3, 0x8e, 0x2f, 0x44, 0x91, 0x9e, 0x1b, 0x07, 0xfb, 0xd6, 0xdf, 0xab, 0xd2, - 0x38, 0x08, 0xcc, 0x50, 0xa4, 0x87, 0x57, 0xc0, 0x94, 0x59, 0xf7, 0x98, 0x8b, 0xe9, 0xb6, 0x69, - 0x3b, 0x58, 0x0c, 0xa7, 0x09, 0xfd, 0xb8, 0xbc, 0xd3, 0xd4, 0xf5, 0x0e, 0x1d, 0x8a, 0x59, 0x42, - 0x0d, 0x00, 0xde, 0x59, 0xcc, 0x31, 0x78, 0x9c, 0x9c, 0x88, 0x33, 0xc3, 0x1f, 0x6c, 0x23, 0x94, - 0xa2, 0x0e, 0x0b, 0xf5, 0x19, 0x58, 0xd8, 0xc6, 0xb4, 0x49, 0x4c, 0xbc, 0x6a, 0x9a, 0xb6, 0x67, - 0xb9, 0x01, 0x6f, 0x2f, 0x83, 0x7c, 0x68, 0x26, 0x9b, 0xef, 0x98, 0x8c, 0x9f, 0x0f, 0xb1, 0x50, - 0x64, 0x13, 0x76, 0x7b, 0x26, 0xb1, 0xdb, 0x7f, 0xc8, 0x80, 0xf1, 0x08, 0x3e, 0xbb, 0x47, 0xac, - 0x8a, 0x44, 0x3e, 0x11, 0x58, 0xdf, 0x23, 0x56, 0xe5, 0x5d, 0xab, 0x34, 0x29, 0xcd, 0xf8, 0x27, - 0x12, 0x86, 0xf0, 0x2e, 0xc8, 0x7a, 0x0c, 0x53, 0xd9, 0xc7, 0x67, 0x53, 0xab, 0xf9, 0x21, 0xc3, - 0x34, 0x20, 0x5a, 0x13, 0x1c, 0x9a, 0x0b, 0x90, 0xc0, 0x80, 0x1b, 0x20, 0x57, 0xe5, 0xaf, 0x22, - 0x5b, 0xf5, 0x5c, 0x2a, 0x58, 0xe7, 0x2f, 0x1a, 0xbf, 0x10, 0x84, 0x04, 0xf9, 0x30, 0x90, 0x82, - 0x19, 0x16, 0x4b, 0xa2, 0x78, 0xb0, 0x61, 0x88, 0x53, 0xdf, 0xdc, 0xeb, 0xb0, 0xdd, 0x2a, 0xcd, - 0xc4, 0x55, 0xa8, 0x2b, 0x82, 0x5a, 0x06, 0x93, 0x1d, 0x57, 0x4c, 0x9f, 0xb5, 0xfa, 0xcd, 0x57, - 0x87, 0xc5, 0x91, 0xd7, 0x87, 0xc5, 0x91, 0x37, 0x87, 0xc5, 0x91, 0xcf, 0xdb, 0x45, 0xe5, 0x55, - 0xbb, 0xa8, 0xbc, 0x6e, 0x17, 0x95, 0x37, 0xed, 0xa2, 0xf2, 0xb6, 0x5d, 0x54, 0xbe, 0xfc, 0xa9, - 0x38, 0xf2, 0xb8, 0x94, 0xf2, 0x2f, 0xda, 0xdf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x6c, 0x4e, - 0x4e, 0xdd, 0x15, 0x00, 0x00, + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1/generated.proto", fileDescriptor_f8a25df358697d27) +} + +var fileDescriptor_f8a25df358697d27 = []byte{ + // 1588 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x73, 0x1b, 0xc5, + 0x16, 0xb6, 0x64, 0xc9, 0xb6, 0x8e, 0x9f, 0x69, 0xc7, 0x65, 0xc5, 0xb9, 0x25, 0x39, 0x73, 0xeb, + 0xe6, 0x71, 0x43, 0xa4, 0xc4, 0x45, 0x20, 0xa9, 0x00, 0xa9, 0x4c, 0x12, 0xf2, 0xb2, 0x1d, 0xa7, + 0x95, 0x07, 0x15, 0xa8, 0x82, 0xd1, 0xa8, 0x2d, 0x4d, 0x2c, 0xcd, 0x0c, 0xdd, 0x33, 0x32, 0xa6, + 0x8a, 0x2a, 0x7e, 0x42, 0x56, 0x2c, 0x59, 0xc0, 0x3f, 0x60, 0x45, 0xc1, 0x86, 0x65, 0x76, 0x64, + 0x19, 0x58, 0xa8, 0x88, 0xf8, 0x0b, 0x2c, 0x20, 0x2b, 0xaa, 0x7b, 0x7a, 0x66, 0x34, 0x92, 0x66, + 0xac, 0xf2, 0x22, 0x6c, 0xd8, 0x79, 0xce, 0xf9, 0xce, 0x77, 0xba, 0x4f, 0x9f, 0x97, 0x0c, 0xea, + 0xce, 0x05, 0x56, 0x32, 0xac, 0xf2, 0x8e, 0x5b, 0x25, 0xd4, 0x24, 0x0e, 0x61, 0xe5, 0x36, 0x31, + 0x6b, 0x16, 0x2d, 0x4b, 0x85, 0x66, 0x1b, 0xe5, 0xed, 0xa6, 0xb5, 0xab, 0x5b, 0xa6, 0x43, 0xad, + 0x66, 0xb9, 0x7d, 0xae, 0x5c, 0x27, 0x26, 0xa1, 0x9a, 0x43, 0x6a, 0x25, 0x9b, 0x5a, 0x8e, 0x85, + 0x8e, 0x78, 0xd0, 0x92, 0x66, 0x1b, 0xa5, 0x1e, 0x68, 0xa9, 0x7d, 0x6e, 0xe5, 0x4c, 0xdd, 0x70, + 0x1a, 0x6e, 0xb5, 0xa4, 0x5b, 0xad, 0x72, 0xdd, 0xaa, 0x5b, 0x65, 0x61, 0x51, 0x75, 0xb7, 0xc5, + 0x97, 0xf8, 0x10, 0x7f, 0x79, 0x4c, 0x2b, 0x6f, 0x86, 0x4e, 0x5b, 0x9a, 0xde, 0x30, 0x4c, 0x42, + 0xf7, 0xca, 0xf6, 0x4e, 0x9d, 0x0b, 0x58, 0xb9, 0x45, 0x1c, 0x6d, 0x88, 0xff, 0x95, 0x72, 0x9c, + 0x15, 0x75, 0x4d, 0xc7, 0x68, 0x91, 0x01, 0x83, 0xb7, 0xf6, 0x33, 0x60, 0x7a, 0x83, 0xb4, 0xb4, + 0x7e, 0x3b, 0xe5, 0xc7, 0x14, 0xac, 0x5e, 0xff, 0x8c, 0xb4, 0x6c, 0x67, 0x8b, 0x1a, 0x16, 0x35, + 0x9c, 0xbd, 0x75, 0xd2, 0x26, 0xcd, 0xab, 0x96, 0xb9, 0x6d, 0xd4, 0x5d, 0xaa, 0x39, 0x86, 0x65, + 0xa2, 0x0f, 0x20, 0x6f, 0x5a, 0x2d, 0xc3, 0xd4, 0xb8, 0x5c, 0x77, 0x29, 0x25, 0xa6, 0xbe, 0x57, + 0x69, 0x68, 0x94, 0xb0, 0x7c, 0x6a, 0x35, 0x75, 0x32, 0xab, 0xfe, 0xa7, 0xdb, 0x29, 0xe6, 0x37, + 0x63, 0x30, 0x38, 0xd6, 0x1a, 0xbd, 0x0b, 0xf3, 0x4d, 0x62, 0xd6, 0xb4, 0x6a, 0x93, 0x6c, 0x11, + 0xaa, 0x13, 0xd3, 0xc9, 0xa7, 0x05, 0xe1, 0x62, 0xb7, 0x53, 0x9c, 0x5f, 0x8f, 0xaa, 0x70, 0x3f, + 0x56, 0x79, 0x0c, 0xcb, 0xef, 0x37, 0xad, 0xdd, 0x6b, 0x06, 0x73, 0x0c, 0xb3, 0xee, 0x1a, 0xac, + 0x41, 0xe8, 0x06, 0x71, 0x1a, 0x56, 0x0d, 0x5d, 0x86, 0x8c, 0xb3, 0x67, 0x13, 0x71, 0xbe, 0x9c, + 0x7a, 0xfa, 0x59, 0xa7, 0x38, 0xd6, 0xed, 0x14, 0x33, 0xf7, 0xf7, 0x6c, 0xf2, 0xaa, 0x53, 0x3c, + 0x1a, 0x63, 0xc6, 0xd5, 0x58, 0x18, 0x2a, 0x4f, 0xd3, 0x00, 0x1c, 0x55, 0x11, 0x81, 0x43, 0x9f, + 0xc0, 0x14, 0x7f, 0xac, 0x9a, 0xe6, 0x68, 0x82, 0x73, 0x7a, 0xed, 0x6c, 0x29, 0x4c, 0x92, 0x20, + 0xe6, 0x25, 0x7b, 0xa7, 0xce, 0x05, 0xac, 0xc4, 0xd1, 0xa5, 0xf6, 0xb9, 0xd2, 0xdd, 0xea, 0x13, + 0xa2, 0x3b, 0x1b, 0xc4, 0xd1, 0x54, 0x24, 0x4f, 0x01, 0xa1, 0x0c, 0x07, 0xac, 0xe8, 0x0e, 0x64, + 0x98, 0x4d, 0x74, 0x11, 0x80, 0xe9, 0xb5, 0x53, 0xa5, 0xd8, 0x14, 0x2c, 0x85, 0xc7, 0xaa, 0xd8, + 0x44, 0x57, 0x67, 0xfc, 0xcb, 0xf1, 0x2f, 0x2c, 0x48, 0x50, 0x05, 0x26, 0x98, 0xa3, 0x39, 0x2e, + 0xcb, 0x8f, 0x0b, 0xba, 0xd3, 0xa3, 0xd1, 0x09, 0x13, 0x75, 0x4e, 0x12, 0x4e, 0x78, 0xdf, 0x58, + 0x52, 0x29, 0x2f, 0xd2, 0xb0, 0x18, 0x82, 0xaf, 0x5a, 0x66, 0xcd, 0x10, 0xf9, 0x71, 0x29, 0x12, + 0xeb, 0x13, 0x7d, 0xb1, 0x5e, 0x1e, 0x62, 0x12, 0xc6, 0x19, 0x5d, 0x0c, 0x4e, 0x9a, 0x16, 0xe6, + 0xc7, 0xa2, 0xce, 0x5f, 0x75, 0x8a, 0xf3, 0x81, 0x59, 0xf4, 0x3c, 0xa8, 0x0d, 0xa8, 0xa9, 0x31, + 0xe7, 0x3e, 0xd5, 0x4c, 0xe6, 0xd1, 0x1a, 0x2d, 0x22, 0x2f, 0xfc, 0xff, 0xd1, 0x5e, 0x87, 0x5b, + 0xa8, 0x2b, 0xd2, 0x25, 0x5a, 0x1f, 0x60, 0xc3, 0x43, 0x3c, 0xa0, 0xe3, 0x30, 0x41, 0x89, 0xc6, + 0x2c, 0x33, 0x9f, 0x11, 0x47, 0x0e, 0xe2, 0x85, 0x85, 0x14, 0x4b, 0x2d, 0x3a, 0x05, 0x93, 0x2d, + 0xc2, 0x98, 0x56, 0x27, 0xf9, 0xac, 0x00, 0xce, 0x4b, 0xe0, 0xe4, 0x86, 0x27, 0xc6, 0xbe, 0x5e, + 0xf9, 0x21, 0x05, 0x73, 0x61, 0x9c, 0xd6, 0x0d, 0xe6, 0xa0, 0x8f, 0x06, 0x32, 0xae, 0x34, 0xda, + 0x9d, 0xb8, 0xb5, 0xc8, 0xb7, 0x05, 0xe9, 0x6e, 0xca, 0x97, 0xf4, 0x64, 0xdb, 0x6d, 0xc8, 0x1a, + 0x0e, 0x69, 0xf1, 0xa8, 0x8f, 0x9f, 0x9c, 0x5e, 0xfb, 0xdf, 0x48, 0xf9, 0xa1, 0xce, 0x4a, 0xc6, + 0xec, 0x2d, 0x6e, 0x8b, 0x3d, 0x0a, 0xe5, 0x97, 0xf1, 0xde, 0xc3, 0xf3, 0x2c, 0x44, 0xdf, 0xa4, + 0x60, 0xc5, 0x8e, 0xed, 0x28, 0xf2, 0x3e, 0xef, 0x25, 0x38, 0x8d, 0x6f, 0x47, 0x98, 0x6c, 0x13, + 0xde, 0x43, 0x88, 0xaa, 0xc8, 0xd3, 0xac, 0x24, 0x80, 0x13, 0x4e, 0x81, 0x6e, 0x03, 0x6a, 0x69, + 0x0e, 0x8f, 0x63, 0x7d, 0x8b, 0x12, 0x9d, 0xd4, 0x38, 0xab, 0x6c, 0x40, 0x41, 0x4e, 0x6c, 0x0c, + 0x20, 0xf0, 0x10, 0x2b, 0xf4, 0x05, 0x2c, 0xd6, 0x06, 0xfb, 0x89, 0x4c, 0xc6, 0xb5, 0x7d, 0xa2, + 0x3b, 0xa4, 0x13, 0xa9, 0xcb, 0xdd, 0x4e, 0x71, 0x71, 0x88, 0x02, 0x0f, 0xf3, 0x83, 0x1e, 0x41, + 0x96, 0xba, 0x4d, 0xc2, 0xf2, 0x19, 0xf1, 0x9c, 0x49, 0x0e, 0xb7, 0xac, 0xa6, 0xa1, 0xef, 0x61, + 0x8e, 0x7e, 0x64, 0x38, 0x8d, 0x8a, 0x2b, 0x9a, 0x11, 0x0b, 0xdf, 0x56, 0xa8, 0xb0, 0xc7, 0xa7, + 0xb4, 0x61, 0xa1, 0xbf, 0x3f, 0xa0, 0x2a, 0x80, 0xee, 0x97, 0x24, 0x9f, 0x00, 0xe3, 0x7d, 0xb9, + 0x19, 0x9f, 0x40, 0x41, 0x25, 0x87, 0xbd, 0x30, 0x10, 0x31, 0xdc, 0xc3, 0xaa, 0x9c, 0x85, 0x99, + 0x1b, 0xd4, 0x72, 0x6d, 0x79, 0x3c, 0xb4, 0x0a, 0x19, 0x53, 0x6b, 0xf9, 0x3d, 0x26, 0x68, 0x79, + 0x9b, 0x5a, 0x8b, 0x60, 0xa1, 0x51, 0xbe, 0x4e, 0xc1, 0xec, 0xba, 0xd1, 0x32, 0x1c, 0x4c, 0x98, + 0x6d, 0x99, 0x8c, 0xa0, 0xf3, 0x91, 0xbe, 0x74, 0xac, 0xaf, 0x2f, 0x1d, 0x8a, 0x80, 0x7b, 0x3a, + 0xd2, 0x43, 0x98, 0xfc, 0xd4, 0x25, 0xae, 0x61, 0xd6, 0x65, 0x2f, 0x2e, 0x27, 0xdc, 0xed, 0x9e, + 0x87, 0x8c, 0x24, 0x96, 0x3a, 0xcd, 0x6b, 0x5c, 0x6a, 0xb0, 0x4f, 0xa6, 0xfc, 0x91, 0x86, 0x63, + 0xc2, 0x27, 0xa9, 0xfd, 0x23, 0xc3, 0x96, 0xc0, 0x6c, 0xb3, 0xf7, 0xca, 0xf2, 0x76, 0x27, 0x13, + 0x6e, 0x17, 0x09, 0x91, 0xba, 0x24, 0x23, 0x18, 0x0d, 0x33, 0x8e, 0xb2, 0x0e, 0x9b, 0xe9, 0xe3, + 0xa3, 0xcf, 0x74, 0x74, 0x17, 0x96, 0xaa, 0x16, 0xa5, 0xd6, 0xae, 0x61, 0xd6, 0x85, 0x1f, 0x9f, + 0x24, 0x23, 0x48, 0x8e, 0x74, 0x3b, 0xc5, 0x25, 0x75, 0x18, 0x00, 0x0f, 0xb7, 0x53, 0x76, 0x61, + 0x69, 0x93, 0x77, 0x0d, 0x66, 0xb9, 0x54, 0x27, 0x61, 0xf6, 0xa3, 0x22, 0x64, 0xdb, 0x84, 0x56, + 0xbd, 0x0c, 0xce, 0xa9, 0x39, 0x9e, 0xfb, 0x0f, 0xb9, 0x00, 0x7b, 0x72, 0x7e, 0x13, 0x33, 0xb4, + 0x7c, 0x80, 0xd7, 0x59, 0x7e, 0x42, 0x40, 0xc5, 0x4d, 0x36, 0xa3, 0x2a, 0xdc, 0x8f, 0x55, 0x7e, + 0x4e, 0xc3, 0x72, 0x4c, 0xb1, 0xa1, 0x2d, 0x98, 0x62, 0xf2, 0x6f, 0x59, 0x40, 0x4a, 0xc2, 0x33, + 0x48, 0xb3, 0xb0, 0xa1, 0xfb, 0x3c, 0x38, 0x60, 0x41, 0x4f, 0x60, 0x96, 0x4a, 0xef, 0xc2, 0x9d, + 0x6c, 0xec, 0x67, 0x12, 0x68, 0x07, 0x63, 0x12, 0x3e, 0x31, 0xee, 0xe5, 0xc2, 0x51, 0x6a, 0xd4, + 0x86, 0x85, 0x9e, 0xcb, 0x7a, 0xee, 0xc6, 0x85, 0xbb, 0xb3, 0x09, 0xee, 0x86, 0xbe, 0x82, 0x9a, + 0x97, 0x1e, 0x17, 0x36, 0xfb, 0x18, 0xf1, 0x80, 0x0f, 0xe5, 0xa7, 0x34, 0x24, 0xf4, 0xfa, 0xd7, + 0xb0, 0xa3, 0x7d, 0x18, 0xd9, 0xd1, 0x2e, 0x1e, 0x68, 0x7e, 0xc5, 0xee, 0x6c, 0x7a, 0xdf, 0xce, + 0x76, 0xe9, 0x60, 0xf4, 0xc9, 0x3b, 0xdc, 0x9f, 0x69, 0xf8, 0x6f, 0xbc, 0x71, 0xb8, 0xd3, 0xdd, + 0x89, 0xf4, 0xce, 0xb7, 0xfb, 0x7a, 0xe7, 0x89, 0x11, 0x28, 0xfe, 0xdd, 0xf1, 0xfa, 0x76, 0xbc, + 0x5f, 0x53, 0x50, 0x88, 0x8f, 0xdb, 0x6b, 0xd8, 0xf9, 0x1e, 0x47, 0x77, 0xbe, 0xf3, 0x07, 0xca, + 0xaf, 0x98, 0x1d, 0xf0, 0x46, 0x52, 0x5a, 0x05, 0x2b, 0xdb, 0x08, 0x63, 0xfc, 0xdb, 0x74, 0x52, + 0x94, 0xc4, 0x72, 0xb9, 0xcf, 0xef, 0x8d, 0x88, 0xf5, 0x75, 0x93, 0x0f, 0x97, 0x16, 0x9f, 0x0f, + 0x5e, 0x2e, 0xea, 0x30, 0xd9, 0xf4, 0x86, 0xb0, 0xac, 0xe2, 0x77, 0xf6, 0x9b, 0x7f, 0x49, 0xe3, + 0xda, 0x1b, 0xf5, 0x12, 0x86, 0x7d, 0x66, 0xf4, 0x31, 0x4c, 0x10, 0xf1, 0xab, 0x7a, 0x84, 0x52, + 0xde, 0xef, 0xe7, 0xb7, 0x0a, 0x3c, 0xed, 0x3c, 0x14, 0x96, 0xb4, 0xca, 0x57, 0x29, 0x58, 0xdd, + 0xaf, 0x07, 0x20, 0x3a, 0x64, 0x4f, 0x3b, 0xd8, 0xce, 0x3d, 0xfa, 0xde, 0xf6, 0x5d, 0x0a, 0x0e, + 0x0f, 0xdb, 0x89, 0x78, 0x41, 0xf1, 0x45, 0x28, 0xd8, 0x62, 0x82, 0x82, 0xba, 0x27, 0xa4, 0x58, + 0x6a, 0xd1, 0x1b, 0x30, 0xd5, 0xd0, 0xcc, 0x5a, 0xc5, 0xf8, 0xdc, 0x5f, 0xc5, 0x83, 0x94, 0xbe, + 0x29, 0xe5, 0x38, 0x40, 0xa0, 0x6b, 0xb0, 0x20, 0xec, 0xd6, 0x89, 0x59, 0x77, 0x1a, 0xe2, 0x1d, + 0xe4, 0xb6, 0x11, 0xcc, 0x95, 0x7b, 0x7d, 0x7a, 0x3c, 0x60, 0xa1, 0xfc, 0x95, 0x02, 0x74, 0x90, + 0x05, 0xe1, 0x34, 0xe4, 0x34, 0xdb, 0x10, 0x7b, 0xaa, 0x57, 0x54, 0x39, 0x75, 0xb6, 0xdb, 0x29, + 0xe6, 0xae, 0x6c, 0xdd, 0xf2, 0x84, 0x38, 0xd4, 0x73, 0xb0, 0x3f, 0x45, 0xbd, 0x69, 0x29, 0xc1, + 0xbe, 0x63, 0x86, 0x43, 0x3d, 0xba, 0x00, 0x33, 0x7a, 0xd3, 0x65, 0x0e, 0xa1, 0x15, 0xdd, 0xb2, + 0x89, 0x68, 0x42, 0x53, 0xea, 0x61, 0x79, 0xa7, 0x99, 0xab, 0x3d, 0x3a, 0x1c, 0x41, 0xa2, 0x12, + 0x00, 0xaf, 0x23, 0x66, 0x6b, 0xdc, 0x4f, 0x56, 0xf8, 0x99, 0xe3, 0x0f, 0xb6, 0x19, 0x48, 0x71, + 0x0f, 0x42, 0x79, 0x02, 0x4b, 0x15, 0x42, 0xdb, 0x86, 0x4e, 0xae, 0xe8, 0xba, 0xe5, 0x9a, 0x8e, + 0xbf, 0x71, 0x97, 0x21, 0x17, 0xc0, 0x64, 0xa9, 0x1d, 0x92, 0xfe, 0x73, 0x01, 0x17, 0x0e, 0x31, + 0x41, 0x6d, 0xa7, 0x63, 0x6b, 0xfb, 0xfb, 0x34, 0x4c, 0x86, 0xf4, 0x99, 0x1d, 0xc3, 0xac, 0x49, + 0xe6, 0xa3, 0x3e, 0xfa, 0x8e, 0x61, 0xd6, 0x5e, 0x75, 0x8a, 0xd3, 0x12, 0xc6, 0x3f, 0xb1, 0x00, + 0xa2, 0x6b, 0x90, 0x71, 0x19, 0xa1, 0xb2, 0x6a, 0x8f, 0x27, 0xe4, 0xf1, 0x03, 0x46, 0xa8, 0xbf, + 0x32, 0x4d, 0x71, 0x52, 0x2e, 0xc0, 0xc2, 0x1a, 0xdd, 0x84, 0x6c, 0x9d, 0xbf, 0x87, 0x2c, 0xcc, + 0x13, 0x09, 0x34, 0xbd, 0xbf, 0x3f, 0xbc, 0xc7, 0x17, 0x12, 0xec, 0x11, 0xa0, 0x26, 0xcc, 0xb1, + 0x48, 0xe0, 0xc4, 0x23, 0x25, 0xaf, 0x40, 0x43, 0x23, 0xad, 0xa2, 0x6e, 0xa7, 0x38, 0x17, 0x55, + 0xe1, 0x3e, 0x6e, 0xa5, 0x0c, 0xd3, 0x3d, 0xd7, 0xda, 0xbf, 0x8f, 0xaa, 0x97, 0x9f, 0xbd, 0x2c, + 0x8c, 0x3d, 0x7f, 0x59, 0x18, 0x7b, 0xf1, 0xb2, 0x30, 0xf6, 0x65, 0xb7, 0x90, 0x7a, 0xd6, 0x2d, + 0xa4, 0x9e, 0x77, 0x0b, 0xa9, 0x17, 0xdd, 0x42, 0xea, 0xb7, 0x6e, 0x21, 0xf5, 0xf4, 0xf7, 0xc2, + 0xd8, 0xe3, 0x23, 0xb1, 0xff, 0x13, 0xfd, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x0a, 0x3e, 0x83, + 0x48, 0x15, 0x00, 0x00, } func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { @@ -1244,9 +1242,11 @@ func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (i } i-- dAtA[i] = 0x12 - i = encodeVarintGenerated(dAtA, i, uint64(m.AssuredConcurrencyShares)) - i-- - dAtA[i] = 0x8 + if m.NominalConcurrencyShares != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } @@ -2007,7 +2007,9 @@ func (m *LimitedPriorityLevelConfiguration) Size() (n int) { } var l int _ = l - n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) + if m.NominalConcurrencyShares != nil { + n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares)) + } l = m.LimitResponse.Size() n += 1 + l + sovGenerated(uint64(l)) if m.LendablePercent != nil { @@ -2384,7 +2386,7 @@ func (this *LimitedPriorityLevelConfiguration) String() string { return "nil" } s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, - `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, + `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`, `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, `BorrowingLimitPercent:` + valueToStringGenerated(this.BorrowingLimitPercent) + `,`, @@ -3713,9 +3715,9 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AssuredConcurrencyShares", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType) } - m.AssuredConcurrencyShares = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3725,11 +3727,12 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AssuredConcurrencyShares |= int32(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } + m.NominalConcurrencyShares = &v case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType) diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1/generated.proto similarity index 94% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto rename to vendor/k8s.io/api/flowcontrol/v1/generated.proto index 6509386f26..a5c6f4fc4f 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1/generated.proto @@ -19,14 +19,14 @@ limitations under the License. syntax = "proto2"; -package k8s.io.api.flowcontrol.v1alpha1; +package k8s.io.api.flowcontrol.v1; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "k8s.io/api/flowcontrol/v1alpha1"; +option go_package = "k8s.io/api/flowcontrol/v1"; // ExemptPriorityLevelConfiguration describes the configurable aspects // of the handling of exempt requests. @@ -153,6 +153,8 @@ message FlowSchemaStatus { // `conditions` is a list of the current states of FlowSchema. // +listType=map // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge // +optional repeated FlowSchemaCondition conditions = 1; } @@ -190,23 +192,28 @@ message LimitResponse { // - How are requests for this priority level limited? // - What should be done with requests that exceed the limit? message LimitedPriorityLevelConfiguration { - // `assuredConcurrencyShares` (ACS) configures the execution - // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must - // be a positive number. The server's concurrency limit (SCL) is - // divided among the concurrency-controlled priority levels in - // proportion to their assured concurrency shares. This produces - // the assured concurrency value (ACV) --- the number of requests - // that may be executing at a time --- for each such priority - // level: + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats available at this priority level. + // This is used both for requests dispatched from this priority level + // as well as requests dispatched from other priority levels + // borrowing seats from this level. + // The server's concurrency limit (ServerCL) is divided among the + // Limited priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) // - // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // + // If not specified, this field defaults to a value of 30. + // + // Setting this field to zero supports the construction of a + // "jail" for this priority level that is used to hold some request(s) // - // bigger numbers of ACS mean more reserved concurrent requests (at the - // expense of every other PL). - // This field has a default value of 30. // +optional - optional int32 assuredConcurrencyShares = 1; + optional int32 nominalConcurrencyShares = 1; // `limitResponse` indicates what to do with requests that can not be executed right now optional LimitResponse limitResponse = 2; @@ -381,6 +388,8 @@ message PriorityLevelConfigurationStatus { // `conditions` is the current state of "request-priority". // +listType=map // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge // +optional repeated PriorityLevelConfigurationCondition conditions = 1; } diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go b/vendor/k8s.io/api/flowcontrol/v1/register.go similarity index 95% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/register.go rename to vendor/k8s.io/api/flowcontrol/v1/register.go index 0c8a9cc565..02725b514e 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go +++ b/vendor/k8s.io/api/flowcontrol/v1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +26,7 @@ import ( const GroupName = "flowcontrol.apiserver.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} // Kind takes an unqualified kind and returns a Group qualified GroupKind func Kind(kind string) schema.GroupKind { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/vendor/k8s.io/api/flowcontrol/v1/types.go similarity index 88% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/types.go rename to vendor/k8s.io/api/flowcontrol/v1/types.go index 161411ff33..e62d23280e 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1/types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -57,13 +57,55 @@ const ( ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" ) +const ( + // AutoUpdateAnnotationKey is the name of an annotation that enables + // automatic update of the spec of the bootstrap configuration + // object(s), if set to 'true'. + // + // On a fresh install, all bootstrap configuration objects will have auto + // update enabled with the following annotation key: + // apf.kubernetes.io/autoupdate-spec: 'true' + // + // The kube-apiserver periodically checks the bootstrap configuration + // objects on the cluster and applies updates if necessary. + // + // kube-apiserver enforces an 'always auto-update' policy for the + // mandatory configuration object(s). This implies: + // - the auto-update annotation key is added with a value of 'true' + // if it is missing. + // - the auto-update annotation key is set to 'true' if its current value + // is a boolean false or has an invalid boolean representation + // (if the cluster operator sets it to 'false' it will be stomped) + // - any changes to the spec made by the cluster operator will be + // stomped, except for changes to the `nominalConcurrencyShares` + // and `lendablePercent` fields of the PriorityLevelConfiguration + // named "exempt". + // + // The kube-apiserver will apply updates on the suggested configuration if: + // - the cluster operator has enabled auto-update by setting the annotation + // (apf.kubernetes.io/autoupdate-spec: 'true') or + // - the annotation key is missing but the generation is 1 + // + // If the suggested configuration object is missing the annotation key, + // kube-apiserver will update the annotation appropriately: + // - it is set to 'true' if generation of the object is '1' which usually + // indicates that the spec of the object has not been changed. + // - it is set to 'false' if generation of the object is greater than 1. + // + // The goal is to enable the kube-apiserver to apply update on suggested + // configuration objects installed by previous releases but not overwrite + // changes made by the cluster operators. + // Note that this distinction is imperfectly detected: in the case where an + // operator deletes a suggested configuration object and later creates it + // but with a variant spec and then does no updates of the object + // (generation is 1), the technique outlined above will incorrectly + // determine that the object should be auto-updated. + AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec" +) + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.18 -// +k8s:prerelease-lifecycle-gen:deprecated=1.20 -// +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -84,10 +126,6 @@ type FlowSchema struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.18 -// +k8s:prerelease-lifecycle-gen:deprecated=1.20 -// +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -314,8 +352,10 @@ type FlowSchemaStatus struct { // `conditions` is a list of the current states of FlowSchema. // +listType=map // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge // +optional - Conditions []FlowSchemaCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + Conditions []FlowSchemaCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` } // FlowSchemaCondition describes conditions for a FlowSchema. @@ -341,10 +381,6 @@ type FlowSchemaConditionType string // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.18 -// +k8s:prerelease-lifecycle-gen:deprecated=1.20 -// +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -364,10 +400,6 @@ type PriorityLevelConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.18 -// +k8s:prerelease-lifecycle-gen:deprecated=1.20 -// +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { @@ -426,23 +458,28 @@ const ( // - How are requests for this priority level limited? // - What should be done with requests that exceed the limit? type LimitedPriorityLevelConfiguration struct { - // `assuredConcurrencyShares` (ACS) configures the execution - // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must - // be a positive number. The server's concurrency limit (SCL) is - // divided among the concurrency-controlled priority levels in - // proportion to their assured concurrency shares. This produces - // the assured concurrency value (ACV) --- the number of requests - // that may be executing at a time --- for each such priority - // level: + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats available at this priority level. + // This is used both for requests dispatched from this priority level + // as well as requests dispatched from other priority levels + // borrowing seats from this level. + // The server's concurrency limit (ServerCL) is divided among the + // Limited priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // + // If not specified, this field defaults to a value of 30. // - // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // Setting this field to zero supports the construction of a + // "jail" for this priority level that is used to hold some request(s) // - // bigger numbers of ACS mean more reserved concurrent requests (at the - // expense of every other PL). - // This field has a default value of 30. // +optional - AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"` + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares" protobuf:"varint,1,opt,name=nominalConcurrencyShares"` // `limitResponse` indicates what to do with requests that can not be executed right now LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` @@ -586,8 +623,10 @@ type PriorityLevelConfigurationStatus struct { // `conditions` is the current state of "request-priority". // +listType=map // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge // +optional - Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` } // PriorityLevelConfigurationCondition defines the condition of priority level. diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1/types_swagger_doc_generated.go similarity index 95% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/flowcontrol/v1/types_swagger_doc_generated.go index 1d0680c108..b8cb436367 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1/types_swagger_doc_generated.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more @@ -122,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string { var map_LimitedPriorityLevelConfiguration = map[string]string{ "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", - "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level.\n\nIf not specified, this field defaults to a value of 30.\n\nSetting this field to zero supports the construction of a \"jail\" for this priority level that is used to hold some request(s)", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1/zz_generated.deepcopy.go similarity index 99% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/flowcontrol/v1/zz_generated.deepcopy.go index a5c9737aa5..f37090b75b 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/flowcontrol/v1/zz_generated.deepcopy.go @@ -19,7 +19,7 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( runtime "k8s.io/apimachinery/pkg/runtime" @@ -237,6 +237,11 @@ func (in *LimitResponse) DeepCopy() *LimitResponse { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { *out = *in + if in.NominalConcurrencyShares != nil { + in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares + *out = new(int32) + **out = **in + } in.LimitResponse.DeepCopyInto(&out.LimitResponse) if in.LendablePercent != nil { in, out := &in.LendablePercent, &out.LendablePercent diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go deleted file mode 100644 index 2b6a3d3fd9..0000000000 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go +++ /dev/null @@ -1,122 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - schema "k8s.io/apimachinery/pkg/runtime/schema" -) - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { - return 1, 18 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { - return 1, 20 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go. -func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchema"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { - return 1, 21 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { - return 1, 18 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { - return 1, 20 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go. -func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchemaList"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { - return 1, 21 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { - return 1, 18 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int) { - return 1, 20 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go. -func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfiguration"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { - return 1, 21 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { - return 1, 18 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor int) { - return 1, 20 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go. -func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfigurationList"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { - return 1, 21 -} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto index 96df0ace79..04b54820c7 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -192,7 +192,7 @@ message LimitResponse { message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go index 9e05ff1a09..abc3e42009 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go @@ -466,7 +466,7 @@ const ( type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go index 1405f3c3ca..d69bdac622 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -122,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string { var map_LimitedPriorityLevelConfiguration = map[string]string{ "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", - "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto index a8c8a32737..a832114afe 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto @@ -192,7 +192,7 @@ message LimitResponse { message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go index e8cf7abfff..c66cb173f4 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go @@ -466,7 +466,7 @@ const ( type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go index 49a4178096..921122731a 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go @@ -122,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string { var map_LimitedPriorityLevelConfiguration = map[string]string{ "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", - "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/types.go b/vendor/k8s.io/api/flowcontrol/v1beta3/types.go index 810941557b..0ffc22a236 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta3/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/types.go @@ -103,10 +103,25 @@ const ( AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec" ) +const ( + // This annotation is only for use in v1beta3. + // + // The presence of this annotation in a v1beta3 object means that + // a zero value in the 'NominalConcurrencyShares' field means zero + // rather than the old default of 30. + // + // To set a zero value for the 'NominalConcurrencyShares' field in v1beta3, + // set the annotation to an empty string: + // "flowcontrol.k8s.io/v1beta3-preserve-zero-concurrency-shares": "" + // + PriorityLevelPreserveZeroConcurrencySharesKey = "flowcontrol.k8s.io/v1beta3-preserve-zero-concurrency-shares" +) + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -128,6 +143,7 @@ type FlowSchema struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -384,6 +400,7 @@ type FlowSchemaConditionType string // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -404,6 +421,7 @@ type PriorityLevelConfiguration struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go index 24b7613850..7e46a1469d 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go @@ -21,6 +21,10 @@ limitations under the License. package v1beta3 +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { @@ -33,6 +37,12 @@ func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { return 1, 29 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go. +func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "FlowSchema"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { @@ -51,6 +61,12 @@ func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { return 1, 29 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go. +func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "FlowSchemaList"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { @@ -69,6 +85,12 @@ func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int return 1, 29 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfiguration"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { @@ -87,6 +109,12 @@ func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor return 1, 29 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfigurationList"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go index f54d1f8242..949ea513fe 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go @@ -25,14 +25,12 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" - v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" reflect "reflect" strings "strings" - - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" ) // Reference imports to suppress errors if they are not otherwise used. @@ -46,15 +44,15 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *ClusterCIDR) Reset() { *m = ClusterCIDR{} } -func (*ClusterCIDR) ProtoMessage() {} -func (*ClusterCIDR) Descriptor() ([]byte, []int) { +func (m *IPAddress) Reset() { *m = IPAddress{} } +func (*IPAddress) ProtoMessage() {} +func (*IPAddress) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{0} } -func (m *ClusterCIDR) XXX_Unmarshal(b []byte) error { +func (m *IPAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ClusterCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -62,27 +60,27 @@ func (m *ClusterCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) } return b[:n], nil } -func (m *ClusterCIDR) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterCIDR.Merge(m, src) +func (m *IPAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddress.Merge(m, src) } -func (m *ClusterCIDR) XXX_Size() int { +func (m *IPAddress) XXX_Size() int { return m.Size() } -func (m *ClusterCIDR) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterCIDR.DiscardUnknown(m) +func (m *IPAddress) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddress.DiscardUnknown(m) } -var xxx_messageInfo_ClusterCIDR proto.InternalMessageInfo +var xxx_messageInfo_IPAddress proto.InternalMessageInfo -func (m *ClusterCIDRList) Reset() { *m = ClusterCIDRList{} } -func (*ClusterCIDRList) ProtoMessage() {} -func (*ClusterCIDRList) Descriptor() ([]byte, []int) { +func (m *IPAddressList) Reset() { *m = IPAddressList{} } +func (*IPAddressList) ProtoMessage() {} +func (*IPAddressList) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{1} } -func (m *ClusterCIDRList) XXX_Unmarshal(b []byte) error { +func (m *IPAddressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ClusterCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -90,27 +88,27 @@ func (m *ClusterCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, err } return b[:n], nil } -func (m *ClusterCIDRList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterCIDRList.Merge(m, src) +func (m *IPAddressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressList.Merge(m, src) } -func (m *ClusterCIDRList) XXX_Size() int { +func (m *IPAddressList) XXX_Size() int { return m.Size() } -func (m *ClusterCIDRList) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterCIDRList.DiscardUnknown(m) +func (m *IPAddressList) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressList.DiscardUnknown(m) } -var xxx_messageInfo_ClusterCIDRList proto.InternalMessageInfo +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo -func (m *ClusterCIDRSpec) Reset() { *m = ClusterCIDRSpec{} } -func (*ClusterCIDRSpec) ProtoMessage() {} -func (*ClusterCIDRSpec) Descriptor() ([]byte, []int) { +func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } +func (*IPAddressSpec) ProtoMessage() {} +func (*IPAddressSpec) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{2} } -func (m *ClusterCIDRSpec) XXX_Unmarshal(b []byte) error { +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ClusterCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -118,27 +116,27 @@ func (m *ClusterCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, err } return b[:n], nil } -func (m *ClusterCIDRSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterCIDRSpec.Merge(m, src) +func (m *IPAddressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressSpec.Merge(m, src) } -func (m *ClusterCIDRSpec) XXX_Size() int { +func (m *IPAddressSpec) XXX_Size() int { return m.Size() } -func (m *ClusterCIDRSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterCIDRSpec.DiscardUnknown(m) +func (m *IPAddressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) } -var xxx_messageInfo_ClusterCIDRSpec proto.InternalMessageInfo +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo -func (m *IPAddress) Reset() { *m = IPAddress{} } -func (*IPAddress) ProtoMessage() {} -func (*IPAddress) Descriptor() ([]byte, []int) { +func (m *ParentReference) Reset() { *m = ParentReference{} } +func (*ParentReference) ProtoMessage() {} +func (*ParentReference) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{3} } -func (m *IPAddress) XXX_Unmarshal(b []byte) error { +func (m *ParentReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -146,27 +144,27 @@ func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { } return b[:n], nil } -func (m *IPAddress) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddress.Merge(m, src) +func (m *ParentReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParentReference.Merge(m, src) } -func (m *IPAddress) XXX_Size() int { +func (m *ParentReference) XXX_Size() int { return m.Size() } -func (m *IPAddress) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddress.DiscardUnknown(m) +func (m *ParentReference) XXX_DiscardUnknown() { + xxx_messageInfo_ParentReference.DiscardUnknown(m) } -var xxx_messageInfo_IPAddress proto.InternalMessageInfo +var xxx_messageInfo_ParentReference proto.InternalMessageInfo -func (m *IPAddressList) Reset() { *m = IPAddressList{} } -func (*IPAddressList) ProtoMessage() {} -func (*IPAddressList) Descriptor() ([]byte, []int) { +func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} } +func (*ServiceCIDR) ProtoMessage() {} +func (*ServiceCIDR) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{4} } -func (m *IPAddressList) XXX_Unmarshal(b []byte) error { +func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -174,27 +172,27 @@ func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error } return b[:n], nil } -func (m *IPAddressList) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddressList.Merge(m, src) +func (m *ServiceCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDR.Merge(m, src) } -func (m *IPAddressList) XXX_Size() int { +func (m *ServiceCIDR) XXX_Size() int { return m.Size() } -func (m *IPAddressList) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddressList.DiscardUnknown(m) +func (m *ServiceCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDR.DiscardUnknown(m) } -var xxx_messageInfo_IPAddressList proto.InternalMessageInfo +var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo -func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } -func (*IPAddressSpec) ProtoMessage() {} -func (*IPAddressSpec) Descriptor() ([]byte, []int) { +func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} } +func (*ServiceCIDRList) ProtoMessage() {} +func (*ServiceCIDRList) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{5} } -func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { +func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -202,27 +200,27 @@ func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error } return b[:n], nil } -func (m *IPAddressSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddressSpec.Merge(m, src) +func (m *ServiceCIDRList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRList.Merge(m, src) } -func (m *IPAddressSpec) XXX_Size() int { +func (m *ServiceCIDRList) XXX_Size() int { return m.Size() } -func (m *IPAddressSpec) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) +func (m *ServiceCIDRList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m) } -var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo +var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo -func (m *ParentReference) Reset() { *m = ParentReference{} } -func (*ParentReference) ProtoMessage() {} -func (*ParentReference) Descriptor() ([]byte, []int) { +func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} } +func (*ServiceCIDRSpec) ProtoMessage() {} +func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{6} } -func (m *ParentReference) XXX_Unmarshal(b []byte) error { +func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -230,26 +228,55 @@ func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, err } return b[:n], nil } -func (m *ParentReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ParentReference.Merge(m, src) +func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRSpec.Merge(m, src) } -func (m *ParentReference) XXX_Size() int { +func (m *ServiceCIDRSpec) XXX_Size() int { return m.Size() } -func (m *ParentReference) XXX_DiscardUnknown() { - xxx_messageInfo_ParentReference.DiscardUnknown(m) +func (m *ServiceCIDRSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m) } -var xxx_messageInfo_ParentReference proto.InternalMessageInfo +var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo + +func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} } +func (*ServiceCIDRStatus) ProtoMessage() {} +func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{7} +} +func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRStatus.Merge(m, src) +} +func (m *ServiceCIDRStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo func init() { - proto.RegisterType((*ClusterCIDR)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDR") - proto.RegisterType((*ClusterCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRList") - proto.RegisterType((*ClusterCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRSpec") proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress") proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList") proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec") proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1alpha1.ParentReference") + proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDR") + proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRList") + proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRSpec") + proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRStatus") } func init() { @@ -257,54 +284,51 @@ func init() { } var fileDescriptor_c1b7ac8d7d97acec = []byte{ - // 698 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0xcf, 0x4e, 0xdb, 0x4a, - 0x14, 0xc6, 0x63, 0x92, 0x48, 0x78, 0x00, 0x85, 0xeb, 0xcd, 0x8d, 0x58, 0x38, 0xb9, 0xb9, 0x1b, - 0xae, 0x6e, 0x19, 0x03, 0x42, 0x51, 0xb7, 0x98, 0x48, 0x34, 0x52, 0x0b, 0xe9, 0x20, 0xba, 0xa8, - 0x58, 0xd4, 0xb1, 0x0f, 0x8e, 0x1b, 0xfc, 0x47, 0x33, 0xe3, 0x54, 0xec, 0xfa, 0x08, 0x7d, 0xa1, - 0x56, 0x6a, 0x57, 0x2c, 0x59, 0xb2, 0x8a, 0x8a, 0xfb, 0x02, 0x5d, 0xb7, 0x9b, 0x6a, 0x26, 0x4e, - 0xec, 0x24, 0x0d, 0xd0, 0x0d, 0xbb, 0xcc, 0x39, 0xbf, 0xf3, 0xcd, 0x39, 0x73, 0xbe, 0x24, 0xe8, - 0xb0, 0xff, 0x94, 0x61, 0x2f, 0x34, 0xfa, 0x71, 0x17, 0x68, 0x00, 0x1c, 0x98, 0x31, 0x80, 0xc0, - 0x09, 0xa9, 0x91, 0x26, 0xac, 0xc8, 0x33, 0x02, 0xe0, 0xef, 0x42, 0xda, 0xf7, 0x02, 0xd7, 0x18, - 0xec, 0x58, 0x17, 0x51, 0xcf, 0xda, 0x31, 0x5c, 0x08, 0x80, 0x5a, 0x1c, 0x1c, 0x1c, 0xd1, 0x90, - 0x87, 0x9a, 0x3e, 0xe2, 0xb1, 0x15, 0x79, 0x38, 0xe3, 0xf1, 0x98, 0xdf, 0xd8, 0x72, 0x3d, 0xde, - 0x8b, 0xbb, 0xd8, 0x0e, 0x7d, 0xc3, 0x0d, 0xdd, 0xd0, 0x90, 0x65, 0xdd, 0xf8, 0x5c, 0x9e, 0xe4, - 0x41, 0x7e, 0x1a, 0xc9, 0x6d, 0x34, 0x72, 0xd7, 0xdb, 0x21, 0x05, 0x63, 0x30, 0x77, 0xe5, 0xc6, - 0x5e, 0xc6, 0xf8, 0x96, 0xdd, 0xf3, 0x02, 0xa0, 0x97, 0x46, 0xd4, 0x77, 0x45, 0x80, 0x19, 0x3e, - 0x70, 0xeb, 0x77, 0x55, 0xc6, 0xa2, 0x2a, 0x1a, 0x07, 0xdc, 0xf3, 0x61, 0xae, 0xa0, 0x79, 0x5f, - 0x01, 0xb3, 0x7b, 0xe0, 0x5b, 0xb3, 0x75, 0x8d, 0x2f, 0x0a, 0x5a, 0x39, 0xb8, 0x88, 0x19, 0x07, - 0x7a, 0xd0, 0x6e, 0x11, 0xed, 0x0d, 0x5a, 0x16, 0x3d, 0x39, 0x16, 0xb7, 0xaa, 0x4a, 0x5d, 0xd9, - 0x5c, 0xd9, 0xdd, 0xc6, 0xd9, 0xa3, 0x4d, 0xa4, 0x71, 0xd4, 0x77, 0x45, 0x80, 0x61, 0x41, 0xe3, - 0xc1, 0x0e, 0x3e, 0xee, 0xbe, 0x05, 0x9b, 0xbf, 0x00, 0x6e, 0x99, 0xda, 0xd5, 0xb0, 0x56, 0x48, - 0x86, 0x35, 0x94, 0xc5, 0xc8, 0x44, 0x55, 0x7b, 0x89, 0x4a, 0x2c, 0x02, 0xbb, 0xba, 0x24, 0xd5, - 0x0d, 0x7c, 0xf7, 0x4a, 0x70, 0xae, 0xb9, 0x93, 0x08, 0x6c, 0x73, 0x35, 0x15, 0x2f, 0x89, 0x13, - 0x91, 0x52, 0x8d, 0xcf, 0x0a, 0xaa, 0xe4, 0xb8, 0xe7, 0x1e, 0xe3, 0xda, 0xd9, 0xdc, 0x20, 0xf8, - 0x61, 0x83, 0x88, 0x6a, 0x39, 0xc6, 0x7a, 0x7a, 0xd3, 0xf2, 0x38, 0x92, 0x1b, 0xa2, 0x83, 0xca, - 0x1e, 0x07, 0x9f, 0x55, 0x97, 0xea, 0xc5, 0xcd, 0x95, 0xdd, 0xff, 0xff, 0x60, 0x0a, 0x73, 0x2d, - 0xd5, 0x2d, 0xb7, 0x85, 0x02, 0x19, 0x09, 0x35, 0xbe, 0x4f, 0xcf, 0x20, 0xa6, 0xd3, 0x5e, 0xa1, - 0xd5, 0x20, 0x74, 0xe0, 0x04, 0x2e, 0xc0, 0xe6, 0x21, 0x4d, 0xe7, 0xa8, 0xe7, 0x2f, 0x13, 0xb6, - 0x13, 0x5d, 0x1f, 0xe5, 0x38, 0x73, 0x3d, 0x19, 0xd6, 0x56, 0xf3, 0x11, 0x32, 0xa5, 0xa3, 0xed, - 0xa3, 0x4a, 0x04, 0x54, 0x00, 0xcf, 0x42, 0xc6, 0x4d, 0x8f, 0x33, 0xb9, 0x8d, 0xb2, 0xf9, 0x77, - 0xda, 0x5a, 0xa5, 0x33, 0x9d, 0x26, 0xb3, 0xbc, 0x56, 0x47, 0x25, 0x2f, 0x1a, 0xec, 0x55, 0x8b, - 0x75, 0x65, 0x53, 0xcd, 0x96, 0xd2, 0xee, 0x0c, 0xf6, 0x88, 0xcc, 0xa4, 0x44, 0xb3, 0x5a, 0x9a, - 0x23, 0x9a, 0x92, 0x68, 0x36, 0x3e, 0x29, 0x48, 0x6d, 0x77, 0xf6, 0x1d, 0x87, 0x02, 0x63, 0x8f, - 0xe0, 0xbc, 0xe3, 0x29, 0xe7, 0x6d, 0xdd, 0xb7, 0xb3, 0x49, 0x6b, 0x0b, 0x7d, 0xf7, 0x51, 0x41, - 0x6b, 0x13, 0xea, 0x11, 0x5c, 0x77, 0x34, 0xed, 0xba, 0xff, 0x1e, 0x3c, 0xc1, 0x02, 0xcf, 0xf9, - 0xb9, 0xf6, 0xa5, 0xe1, 0xce, 0x90, 0x1a, 0x59, 0x14, 0x02, 0x4e, 0xe0, 0x3c, 0xed, 0xff, 0xde, - 0x2f, 0x68, 0x67, 0x5c, 0x00, 0x14, 0x02, 0x1b, 0xcc, 0xb5, 0x64, 0x58, 0x53, 0x27, 0x41, 0x92, - 0x09, 0x36, 0x7e, 0x2a, 0xa8, 0x32, 0x43, 0x6b, 0xff, 0xa2, 0xb2, 0x4b, 0xc3, 0x38, 0x92, 0xb7, - 0xa9, 0x59, 0x9f, 0x87, 0x22, 0x48, 0x46, 0x39, 0xed, 0x09, 0x5a, 0xa6, 0xc0, 0xc2, 0x98, 0xda, - 0x20, 0x97, 0xa7, 0x66, 0xaf, 0x44, 0xd2, 0x38, 0x99, 0x10, 0x9a, 0x81, 0xd4, 0xc0, 0xf2, 0x81, - 0x45, 0x96, 0x0d, 0xa9, 0x3f, 0xff, 0x4a, 0x71, 0xf5, 0x68, 0x9c, 0x20, 0x19, 0x23, 0x9c, 0x2a, - 0x0e, 0xb3, 0x4e, 0x15, 0x2c, 0x91, 0x19, 0xcd, 0x44, 0xc5, 0xd8, 0x73, 0xaa, 0x65, 0x09, 0x6c, - 0xa7, 0x40, 0xf1, 0xb4, 0xdd, 0xfa, 0x31, 0xac, 0xfd, 0xb3, 0xe8, 0x97, 0x97, 0x5f, 0x46, 0xc0, - 0xf0, 0x69, 0xbb, 0x45, 0x44, 0xb1, 0xd9, 0xba, 0xba, 0xd5, 0x0b, 0xd7, 0xb7, 0x7a, 0xe1, 0xe6, - 0x56, 0x2f, 0xbc, 0x4f, 0x74, 0xe5, 0x2a, 0xd1, 0x95, 0xeb, 0x44, 0x57, 0x6e, 0x12, 0x5d, 0xf9, - 0x9a, 0xe8, 0xca, 0x87, 0x6f, 0x7a, 0xe1, 0xb5, 0x7e, 0xf7, 0x3f, 0xda, 0xaf, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xf9, 0x9d, 0x9e, 0xc6, 0x0b, 0x07, 0x00, 0x00, -} - -func (m *ClusterCIDR) Marshal() (dAtA []byte, err error) { + // 648 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0x8d, 0xdb, 0xa4, 0xaa, 0x27, 0x5f, 0xbf, 0x52, 0xaf, 0xa2, 0x2e, 0x9c, 0x28, 0x6c, 0x8a, + 0xa0, 0x33, 0x24, 0x42, 0x88, 0x2d, 0x6e, 0xa5, 0xaa, 0x12, 0xb4, 0x65, 0xba, 0x02, 0x75, 0xc1, + 0xc4, 0xbe, 0x75, 0x4c, 0xf0, 0x8f, 0x66, 0xc6, 0x01, 0x76, 0x3c, 0x02, 0x2f, 0xc0, 0x73, 0xb0, + 0x02, 0x89, 0x5d, 0x97, 0x5d, 0x76, 0x55, 0x51, 0xf3, 0x22, 0x68, 0xc6, 0x8e, 0x9d, 0x34, 0xea, + 0xdf, 0xa6, 0x3b, 0xcf, 0xb9, 0xe7, 0x9c, 0xb9, 0xe7, 0xce, 0x8c, 0x8c, 0x76, 0x46, 0x2f, 0x04, + 0x0e, 0x62, 0x32, 0x4a, 0x07, 0xc0, 0x23, 0x90, 0x20, 0xc8, 0x18, 0x22, 0x2f, 0xe6, 0xa4, 0x28, + 0xb0, 0x24, 0x20, 0x11, 0xc8, 0x4f, 0x31, 0x1f, 0x05, 0x91, 0x4f, 0xc6, 0x3d, 0xf6, 0x31, 0x19, + 0xb2, 0x1e, 0xf1, 0x21, 0x02, 0xce, 0x24, 0x78, 0x38, 0xe1, 0xb1, 0x8c, 0x2d, 0x3b, 0xe7, 0x63, + 0x96, 0x04, 0xb8, 0xe2, 0xe3, 0x09, 0x7f, 0x7d, 0xd3, 0x0f, 0xe4, 0x30, 0x1d, 0x60, 0x37, 0x0e, + 0x89, 0x1f, 0xfb, 0x31, 0xd1, 0xb2, 0x41, 0x7a, 0xac, 0x57, 0x7a, 0xa1, 0xbf, 0x72, 0xbb, 0xf5, + 0x67, 0xd5, 0xf6, 0x21, 0x73, 0x87, 0x41, 0x04, 0xfc, 0x0b, 0x49, 0x46, 0xbe, 0x02, 0x04, 0x09, + 0x41, 0x32, 0x32, 0x9e, 0x6b, 0x62, 0x9d, 0x5c, 0xa5, 0xe2, 0x69, 0x24, 0x83, 0x10, 0xe6, 0x04, + 0xcf, 0x6f, 0x12, 0x08, 0x77, 0x08, 0x21, 0xbb, 0xac, 0xeb, 0xfe, 0x32, 0x90, 0xb9, 0x7b, 0xf0, + 0xd2, 0xf3, 0x38, 0x08, 0x61, 0xbd, 0x47, 0xcb, 0xaa, 0x23, 0x8f, 0x49, 0xd6, 0x32, 0x3a, 0xc6, + 0x46, 0xb3, 0xff, 0x14, 0x57, 0xe3, 0x28, 0x8d, 0x71, 0x32, 0xf2, 0x15, 0x20, 0xb0, 0x62, 0xe3, + 0x71, 0x0f, 0xef, 0x0f, 0x3e, 0x80, 0x2b, 0x5f, 0x83, 0x64, 0x8e, 0x75, 0x72, 0xde, 0xae, 0x65, + 0xe7, 0x6d, 0x54, 0x61, 0xb4, 0x74, 0xb5, 0xf6, 0x51, 0x5d, 0x24, 0xe0, 0xb6, 0x16, 0xb4, 0xfb, + 0x26, 0xbe, 0x7e, 0xd8, 0xb8, 0x6c, 0xed, 0x30, 0x01, 0xd7, 0xf9, 0xaf, 0xb0, 0xae, 0xab, 0x15, + 0xd5, 0x46, 0xdd, 0x9f, 0x06, 0x5a, 0x29, 0x59, 0xaf, 0x02, 0x21, 0xad, 0xa3, 0xb9, 0x10, 0xf8, + 0x76, 0x21, 0x94, 0x5a, 0x47, 0x78, 0x50, 0xec, 0xb3, 0x3c, 0x41, 0xa6, 0x02, 0xec, 0xa1, 0x46, + 0x20, 0x21, 0x14, 0xad, 0x85, 0xce, 0xe2, 0x46, 0xb3, 0xff, 0xe8, 0xd6, 0x09, 0x9c, 0x95, 0xc2, + 0xb5, 0xb1, 0xab, 0xf4, 0x34, 0xb7, 0xe9, 0x86, 0x53, 0xed, 0xab, 0x58, 0xd6, 0x11, 0x32, 0x13, + 0xc6, 0x21, 0x92, 0x14, 0x8e, 0x8b, 0xfe, 0xc9, 0x4d, 0x9b, 0x1c, 0x4c, 0x04, 0xc0, 0x21, 0x72, + 0xc1, 0x59, 0xc9, 0xce, 0xdb, 0x66, 0x09, 0xd2, 0xca, 0xb0, 0xfb, 0xc3, 0x40, 0xab, 0x97, 0xd8, + 0xd6, 0x43, 0xd4, 0xf0, 0x79, 0x9c, 0x26, 0x7a, 0x37, 0xb3, 0xea, 0x73, 0x47, 0x81, 0x34, 0xaf, + 0x59, 0x4f, 0xd0, 0x32, 0x07, 0x11, 0xa7, 0xdc, 0x05, 0x7d, 0x78, 0x66, 0x35, 0x25, 0x5a, 0xe0, + 0xb4, 0x64, 0x58, 0x04, 0x99, 0x11, 0x0b, 0x41, 0x24, 0xcc, 0x85, 0xd6, 0xa2, 0xa6, 0xaf, 0x15, + 0x74, 0x73, 0x6f, 0x52, 0xa0, 0x15, 0xc7, 0xea, 0xa0, 0xba, 0x5a, 0xb4, 0xea, 0x9a, 0x5b, 0x1e, + 0xb4, 0xe2, 0x52, 0x5d, 0xe9, 0x7e, 0x5f, 0x40, 0xcd, 0x43, 0xe0, 0xe3, 0xc0, 0x85, 0xad, 0xdd, + 0x6d, 0x7a, 0x0f, 0x77, 0xf5, 0xcd, 0xcc, 0x5d, 0xbd, 0xf1, 0x10, 0xa6, 0x9a, 0xbb, 0xea, 0xb6, + 0x5a, 0x6f, 0xd1, 0x92, 0x90, 0x4c, 0xa6, 0x42, 0x0f, 0xa5, 0xd9, 0xef, 0xdd, 0xc5, 0x54, 0x0b, + 0x9d, 0xff, 0x0b, 0xdb, 0xa5, 0x7c, 0x4d, 0x0b, 0xc3, 0xee, 0x6f, 0x03, 0xad, 0x4e, 0xb1, 0xef, + 0xe1, 0x29, 0x1c, 0xcc, 0x3e, 0x85, 0xc7, 0x77, 0xc8, 0x72, 0xc5, 0x63, 0xe8, 0xcf, 0x44, 0xd0, + 0xcf, 0xa1, 0x8d, 0x1a, 0x6e, 0xe0, 0x71, 0xd1, 0x32, 0x3a, 0x8b, 0x1b, 0xa6, 0x63, 0x2a, 0x8d, + 0x2a, 0x0a, 0x9a, 0xe3, 0xdd, 0xcf, 0x68, 0x6d, 0x6e, 0x48, 0x96, 0x8b, 0x90, 0x1b, 0x47, 0x5e, + 0x20, 0x83, 0x38, 0xca, 0xa5, 0xb3, 0x07, 0x78, 0x4d, 0xf4, 0xad, 0x89, 0xae, 0xba, 0x1d, 0x25, + 0x24, 0xe8, 0x94, 0xad, 0xb3, 0x7d, 0x72, 0x61, 0xd7, 0x4e, 0x2f, 0xec, 0xda, 0xd9, 0x85, 0x5d, + 0xfb, 0x9a, 0xd9, 0xc6, 0x49, 0x66, 0x1b, 0xa7, 0x99, 0x6d, 0x9c, 0x65, 0xb6, 0xf1, 0x27, 0xb3, + 0x8d, 0x6f, 0x7f, 0xed, 0xda, 0x3b, 0xfb, 0xfa, 0xff, 0xcf, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x29, 0x82, 0x11, 0x57, 0xb9, 0x06, 0x00, 0x00, +} + +func (m *IPAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -314,12 +338,12 @@ func (m *ClusterCIDR) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ClusterCIDR) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ClusterCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -347,7 +371,7 @@ func (m *ClusterCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ClusterCIDRList) Marshal() (dAtA []byte, err error) { +func (m *IPAddressList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -357,12 +381,12 @@ func (m *ClusterCIDRList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ClusterCIDRList) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ClusterCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -394,7 +418,7 @@ func (m *ClusterCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ClusterCIDRSpec) Marshal() (dAtA []byte, err error) { +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -404,32 +428,19 @@ func (m *ClusterCIDRSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ClusterCIDRSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ClusterCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.IPv6) - copy(dAtA[i:], m.IPv6) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv6))) - i-- - dAtA[i] = 0x22 - i -= len(m.IPv4) - copy(dAtA[i:], m.IPv4) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv4))) - i-- - dAtA[i] = 0x1a - i = encodeVarintGenerated(dAtA, i, uint64(m.PerNodeHostBits)) - i-- - dAtA[i] = 0x10 - if m.NodeSelector != nil { + if m.ParentRef != nil { { - size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -442,7 +453,7 @@ func (m *ClusterCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IPAddress) Marshal() (dAtA []byte, err error) { +func (m *ParentReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -452,16 +463,69 @@ func (m *IPAddress) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -485,7 +549,7 @@ func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IPAddressList) Marshal() (dAtA []byte, err error) { +func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -495,12 +559,12 @@ func (m *IPAddressList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { +func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -532,7 +596,7 @@ func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { +func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -542,32 +606,29 @@ func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ParentRef != nil { - { - size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.CIDRs) > 0 { + for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CIDRs[iNdEx]) + copy(dAtA[i:], m.CIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx]))) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ParentReference) Marshal() (dAtA []byte, err error) { +func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -577,41 +638,30 @@ func (m *ParentReference) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { +func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x2a - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x22 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x1a - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i-- - dAtA[i] = 0x12 - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- - dAtA[i] = 0xa + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } @@ -626,7 +676,7 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *ClusterCIDR) Size() (n int) { +func (m *IPAddress) Size() (n int) { if m == nil { return 0 } @@ -639,7 +689,7 @@ func (m *ClusterCIDR) Size() (n int) { return n } -func (m *ClusterCIDRList) Size() (n int) { +func (m *IPAddressList) Size() (n int) { if m == nil { return 0 } @@ -656,25 +706,37 @@ func (m *ClusterCIDRList) Size() (n int) { return n } -func (m *ClusterCIDRSpec) Size() (n int) { +func (m *IPAddressSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.NodeSelector != nil { - l = m.NodeSelector.Size() + if m.ParentRef != nil { + l = m.ParentRef.Size() n += 1 + l + sovGenerated(uint64(l)) } - n += 1 + sovGenerated(uint64(m.PerNodeHostBits)) - l = len(m.IPv4) + return n +} + +func (m *ParentReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IPv6) + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *IPAddress) Size() (n int) { +func (m *ServiceCIDR) Size() (n int) { if m == nil { return 0 } @@ -684,10 +746,12 @@ func (m *IPAddress) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *IPAddressList) Size() (n int) { +func (m *ServiceCIDRList) Size() (n int) { if m == nil { return 0 } @@ -704,35 +768,33 @@ func (m *IPAddressList) Size() (n int) { return n } -func (m *IPAddressSpec) Size() (n int) { +func (m *ServiceCIDRSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.ParentRef != nil { - l = m.ParentRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.CIDRs) > 0 { + for _, s := range m.CIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } return n } -func (m *ParentReference) Size() (n int) { +func (m *ServiceCIDRStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -742,93 +804,105 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *ClusterCIDR) String() string { +func (this *IPAddress) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ClusterCIDR{`, + s := strings.Join([]string{`&IPAddress{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterCIDRSpec", "ClusterCIDRSpec", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *ClusterCIDRList) String() string { +func (this *IPAddressList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ClusterCIDR{" + repeatedStringForItems := "[]IPAddress{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterCIDR", "ClusterCIDR", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&ClusterCIDRList{`, + s := strings.Join([]string{`&IPAddressList{`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *ClusterCIDRSpec) String() string { +func (this *IPAddressSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ClusterCIDRSpec{`, - `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, - `PerNodeHostBits:` + fmt.Sprintf("%v", this.PerNodeHostBits) + `,`, - `IPv4:` + fmt.Sprintf("%v", this.IPv4) + `,`, - `IPv6:` + fmt.Sprintf("%v", this.IPv6) + `,`, + s := strings.Join([]string{`&IPAddressSpec{`, + `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, `}`, }, "") return s } -func (this *IPAddress) String() string { +func (this *ParentReference) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IPAddress{`, + s := strings.Join([]string{`&ParentReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceCIDR{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *IPAddressList) String() string { +func (this *ServiceCIDRList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]IPAddress{" + repeatedStringForItems := "[]ServiceCIDR{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&IPAddressList{`, + s := strings.Join([]string{`&ServiceCIDRList{`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *IPAddressSpec) String() string { +func (this *ServiceCIDRSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IPAddressSpec{`, - `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, + s := strings.Join([]string{`&ServiceCIDRSpec{`, + `CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`, `}`, }, "") return s } -func (this *ParentReference) String() string { +func (this *ServiceCIDRStatus) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ParentReference{`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ServiceCIDRStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -841,7 +915,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { +func (m *IPAddress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -864,10 +938,10 @@ func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDR: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -957,7 +1031,7 @@ func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { +func (m *IPAddressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -980,10 +1054,10 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDRList: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1018,10 +1092,94 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - iNdEx = postIndex - case 2: + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, IPAddress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1048,8 +1206,10 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ClusterCIDR{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ParentRef == nil { + m.ParentRef = &ParentReference{} + } + if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1074,7 +1234,7 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { +func (m *ParentReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1097,17 +1257,17 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDRSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1117,33 +1277,29 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeSelector == nil { - m.NodeSelector = &v11.NodeSelector{} - } - if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PerNodeHostBits", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - m.PerNodeHostBits = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1153,14 +1309,27 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PerNodeHostBits |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPv4", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1188,11 +1357,11 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IPv4 = string(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPv6", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1220,7 +1389,7 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IPv6 = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1243,7 +1412,7 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IPAddress) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDR) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1266,10 +1435,10 @@ func (m *IPAddress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1338,6 +1507,39 @@ func (m *IPAddress) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1359,7 +1561,7 @@ func (m *IPAddress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IPAddressList) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1382,10 +1584,10 @@ func (m *IPAddressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1450,7 +1652,7 @@ func (m *IPAddressList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, IPAddress{}) + m.Items = append(m.Items, ServiceCIDR{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1476,7 +1678,7 @@ func (m *IPAddressList) Unmarshal(dAtA []byte) error { } return nil } -func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1499,17 +1701,17 @@ func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1519,27 +1721,23 @@ func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ParentRef == nil { - m.ParentRef = &ParentReference{} - } - if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -1562,7 +1760,7 @@ func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ParentReference) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1585,113 +1783,17 @@ func (m *ParentReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1701,55 +1803,25 @@ func (m *ParentReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/vendor/k8s.io/api/networking/v1alpha1/generated.proto index 0f1f30d701..fb7971745d 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.proto @@ -21,7 +21,6 @@ syntax = "proto2"; package k8s.io.api.networking.v1alpha1; -import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -29,69 +28,6 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/networking/v1alpha1"; -// ClusterCIDR represents a single configuration for per-Node Pod CIDR -// allocations when the MultiCIDRRangeAllocator is enabled (see the config for -// kube-controller-manager). A cluster may have any number of ClusterCIDR -// resources, all of which will be considered when allocating a CIDR for a -// Node. A ClusterCIDR is eligible to be used for a given Node when the node -// selector matches the node in question and has free CIDRs to allocate. In -// case of multiple matching ClusterCIDR resources, the allocator will attempt -// to break ties using internal heuristics, but any ClusterCIDR whose node -// selector matches the Node may be used. -message ClusterCIDR { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec is the desired state of the ClusterCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional ClusterCIDRSpec spec = 2; -} - -// ClusterCIDRList contains a list of ClusterCIDR. -message ClusterCIDRList { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of ClusterCIDRs. - repeated ClusterCIDR items = 2; -} - -// ClusterCIDRSpec defines the desired state of ClusterCIDR. -message ClusterCIDRSpec { - // nodeSelector defines which nodes the config is applicable to. - // An empty or nil nodeSelector selects all nodes. - // This field is immutable. - // +optional - optional k8s.io.api.core.v1.NodeSelector nodeSelector = 1; - - // perNodeHostBits defines the number of host bits to be configured per node. - // A subnet mask determines how much of the address is used for network bits - // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the - // address into 24 bits for the network portion and 8 bits for the host portion. - // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). - // Minimum value is 4 (16 IPs). - // This field is immutable. - // +required - optional int32 perNodeHostBits = 2; - - // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - optional string ipv4 = 3; - - // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - optional string ipv6 = 4; -} - // IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs // that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. // An IP address can be represented in different formats, to guarantee the uniqueness of the IP, @@ -147,9 +83,56 @@ message ParentReference { // Name is the name of the object being referenced. // +required optional string name = 4; +} + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +message ServiceCIDR { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRSpec spec = 2; + + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRStatus status = 3; +} + +// ServiceCIDRList contains a list of ServiceCIDR objects. +message ServiceCIDRList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of ServiceCIDRs. + repeated ServiceCIDR items = 2; +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +message ServiceCIDRSpec { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + repeated string cidrs = 1; +} - // UID is the uid of the object being referenced. +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +message ServiceCIDRStatus { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state // +optional - optional string uid = 5; + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; } diff --git a/vendor/k8s.io/api/networking/v1alpha1/register.go b/vendor/k8s.io/api/networking/v1alpha1/register.go index 8dda6394d4..c8f5856b5d 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/register.go +++ b/vendor/k8s.io/api/networking/v1alpha1/register.go @@ -52,10 +52,10 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &ClusterCIDR{}, - &ClusterCIDRList{}, &IPAddress{}, &IPAddressList{}, + &ServiceCIDR{}, + &ServiceCIDRList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/networking/v1alpha1/types.go b/vendor/k8s.io/api/networking/v1alpha1/types.go index 52e4a11e8b..9d56ca193e 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types.go @@ -17,86 +17,9 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" ) -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.25 - -// ClusterCIDR represents a single configuration for per-Node Pod CIDR -// allocations when the MultiCIDRRangeAllocator is enabled (see the config for -// kube-controller-manager). A cluster may have any number of ClusterCIDR -// resources, all of which will be considered when allocating a CIDR for a -// Node. A ClusterCIDR is eligible to be used for a given Node when the node -// selector matches the node in question and has free CIDRs to allocate. In -// case of multiple matching ClusterCIDR resources, the allocator will attempt -// to break ties using internal heuristics, but any ClusterCIDR whose node -// selector matches the Node may be used. -type ClusterCIDR struct { - metav1.TypeMeta `json:",inline"` - - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec is the desired state of the ClusterCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Spec ClusterCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// ClusterCIDRSpec defines the desired state of ClusterCIDR. -type ClusterCIDRSpec struct { - // nodeSelector defines which nodes the config is applicable to. - // An empty or nil nodeSelector selects all nodes. - // This field is immutable. - // +optional - NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` - - // perNodeHostBits defines the number of host bits to be configured per node. - // A subnet mask determines how much of the address is used for network bits - // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the - // address into 24 bits for the network portion and 8 bits for the host portion. - // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). - // Minimum value is 4 (16 IPs). - // This field is immutable. - // +required - PerNodeHostBits int32 `json:"perNodeHostBits" protobuf:"varint,2,opt,name=perNodeHostBits"` - - // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - IPv4 string `json:"ipv4" protobuf:"bytes,3,opt,name=ipv4"` - - // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - IPv6 string `json:"ipv6" protobuf:"bytes,4,opt,name=ipv6"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.25 - -// ClusterCIDRList contains a list of ClusterCIDR. -type ClusterCIDRList struct { - metav1.TypeMeta `json:",inline"` - - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of ClusterCIDRs. - Items []ClusterCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -143,9 +66,6 @@ type ParentReference struct { // Name is the name of the object being referenced. // +required Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` - // UID is the uid of the object being referenced. - // +optional - UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -161,3 +81,70 @@ type IPAddressList struct { // items is the list of IPAddresses. Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +type ServiceCIDR struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +type ServiceCIDRSpec struct { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"` +} + +const ( + // ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the + // apiserver to allocate ClusterIPs for Services. + ServiceCIDRConditionReady = "Ready" + // ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is + // being deleted. + ServiceCIDRReasonTerminating = "Terminating" +) + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +type ServiceCIDRStatus struct { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// ServiceCIDRList contains a list of ServiceCIDR objects. +type ServiceCIDRList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of ServiceCIDRs. + Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go index 85304784f4..481ec06030 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -27,38 +27,6 @@ package v1alpha1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_ClusterCIDR = map[string]string{ - "": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (ClusterCIDR) SwaggerDoc() map[string]string { - return map_ClusterCIDR -} - -var map_ClusterCIDRList = map[string]string{ - "": "ClusterCIDRList contains a list of ClusterCIDR.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of ClusterCIDRs.", -} - -func (ClusterCIDRList) SwaggerDoc() map[string]string { - return map_ClusterCIDRList -} - -var map_ClusterCIDRSpec = map[string]string{ - "": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - "nodeSelector": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable.", - "perNodeHostBits": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - "ipv4": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "ipv6": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", -} - -func (ClusterCIDRSpec) SwaggerDoc() map[string]string { - return map_ClusterCIDRSpec -} - var map_IPAddress = map[string]string{ "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -94,11 +62,49 @@ var map_ParentReference = map[string]string{ "resource": "Resource is the resource of the object being referenced.", "namespace": "Namespace is the namespace of the object being referenced.", "name": "Name is the name of the object being referenced.", - "uid": "UID is the uid of the object being referenced.", } func (ParentReference) SwaggerDoc() map[string]string { return map_ParentReference } +var map_ServiceCIDR = map[string]string{ + "": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ServiceCIDR) SwaggerDoc() map[string]string { + return map_ServiceCIDR +} + +var map_ServiceCIDRList = map[string]string{ + "": "ServiceCIDRList contains a list of ServiceCIDR objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of ServiceCIDRs.", +} + +func (ServiceCIDRList) SwaggerDoc() map[string]string { + return map_ServiceCIDRList +} + +var map_ServiceCIDRSpec = map[string]string{ + "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", + "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.", +} + +func (ServiceCIDRSpec) SwaggerDoc() map[string]string { + return map_ServiceCIDRSpec +} + +var map_ServiceCIDRStatus = map[string]string{ + "": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", + "conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state", +} + +func (ServiceCIDRStatus) SwaggerDoc() map[string]string { + return map_ServiceCIDRStatus +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go index 97db2eacc9..5c8f697ba3 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go @@ -22,12 +22,12 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) { +func (in *IPAddress) DeepCopyInto(out *IPAddress) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -35,18 +35,18 @@ func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR. -func (in *ClusterCIDR) DeepCopy() *ClusterCIDR { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. +func (in *IPAddress) DeepCopy() *IPAddress { if in == nil { return nil } - out := new(ClusterCIDR) + out := new(IPAddress) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCIDR) DeepCopyObject() runtime.Object { +func (in *IPAddress) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -54,13 +54,13 @@ func (in *ClusterCIDR) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) { +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]ClusterCIDR, len(*in)) + *out = make([]IPAddress, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -68,18 +68,18 @@ func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList. -func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. +func (in *IPAddressList) DeepCopy() *IPAddressList { if in == nil { return nil } - out := new(ClusterCIDRList) + out := new(IPAddressList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCIDRList) DeepCopyObject() runtime.Object { +func (in *IPAddressList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -87,47 +87,64 @@ func (in *ClusterCIDRList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) { +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { *out = *in - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = new(v1.NodeSelector) - (*in).DeepCopyInto(*out) + if in.ParentRef != nil { + in, out := &in.ParentRef, &out.ParentRef + *out = new(ParentReference) + **out = **in } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec. -func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { if in == nil { return nil } - out := new(ClusterCIDRSpec) + out := new(IPAddressSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddress) DeepCopyInto(out *IPAddress) { +func (in *ParentReference) DeepCopyInto(out *ParentReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { + if in == nil { + return nil + } + out := new(ParentReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. -func (in *IPAddress) DeepCopy() *IPAddress { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR. +func (in *ServiceCIDR) DeepCopy() *ServiceCIDR { if in == nil { return nil } - out := new(IPAddress) + out := new(ServiceCIDR) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IPAddress) DeepCopyObject() runtime.Object { +func (in *ServiceCIDR) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -135,13 +152,13 @@ func (in *IPAddress) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { +func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]IPAddress, len(*in)) + *out = make([]ServiceCIDR, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -149,18 +166,18 @@ func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. -func (in *IPAddressList) DeepCopy() *IPAddressList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList. +func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList { if in == nil { return nil } - out := new(IPAddressList) + out := new(ServiceCIDRList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IPAddressList) DeepCopyObject() runtime.Object { +func (in *ServiceCIDRList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -168,38 +185,45 @@ func (in *IPAddressList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { +func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) { *out = *in - if in.ParentRef != nil { - in, out := &in.ParentRef, &out.ParentRef - *out = new(ParentReference) - **out = **in + if in.CIDRs != nil { + in, out := &in.CIDRs, &out.CIDRs + *out = make([]string, len(*in)) + copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. -func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec. +func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec { if in == nil { return nil } - out := new(IPAddressSpec) + out := new(ServiceCIDRSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ParentReference) DeepCopyInto(out *ParentReference) { +func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. -func (in *ParentReference) DeepCopy() *ParentReference { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus. +func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus { if in == nil { return nil } - out := new(ParentReference) + out := new(ServiceCIDRStatus) in.DeepCopyInto(out) return out } diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go index 60438ba59f..714e7b6253 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -23,72 +23,72 @@ package v1alpha1 // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ClusterCIDR) APILifecycleIntroduced() (major, minor int) { - return 1, 25 +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { + return 1, 27 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ClusterCIDR) APILifecycleDeprecated() (major, minor int) { - return 1, 28 +func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { + return 1, 30 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ClusterCIDR) APILifecycleRemoved() (major, minor int) { - return 1, 31 +func (in *IPAddress) APILifecycleRemoved() (major, minor int) { + return 1, 33 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ClusterCIDRList) APILifecycleIntroduced() (major, minor int) { - return 1, 25 +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { + return 1, 27 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ClusterCIDRList) APILifecycleDeprecated() (major, minor int) { - return 1, 28 +func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { + return 1, 30 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ClusterCIDRList) APILifecycleRemoved() (major, minor int) { - return 1, 31 +func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { + return 1, 33 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { +func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) { return 1, 27 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { +func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) { return 1, 30 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *IPAddress) APILifecycleRemoved() (major, minor int) { +func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) { return 1, 33 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { +func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) { return 1, 27 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { +func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) { return 1, 30 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { +func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) { return 1, 33 } diff --git a/vendor/k8s.io/api/policy/v1/doc.go b/vendor/k8s.io/api/policy/v1/doc.go index b46af58e43..177cdf5236 100644 --- a/vendor/k8s.io/api/policy/v1/doc.go +++ b/vendor/k8s.io/api/policy/v1/doc.go @@ -19,6 +19,6 @@ limitations under the License. // +k8s:openapi-gen=true // Package policy is for any kind of policy object. Suitable examples, even if -// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, +// they aren't all here, are PodDisruptionBudget, // NetworkPolicy, etc. package v1 // import "k8s.io/api/policy/v1" diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go index 9e9c7d13ab..76da54b4c7 100644 --- a/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -20,6 +20,6 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // Package policy is for any kind of policy object. Suitable examples, even if -// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, +// they aren't all here, are PodDisruptionBudget, // NetworkPolicy, etc. package v1beta1 // import "k8s.io/api/policy/v1beta1" diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index 0b75d64154..efba41b3fd 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -26,8 +26,6 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v11 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" @@ -49,94 +47,10 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{0} -} -func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedCSIDriver.Merge(m, src) -} -func (m *AllowedCSIDriver) XXX_Size() int { - return m.Size() -} -func (m *AllowedCSIDriver) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo - -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{1} -} -func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedFlexVolume.Merge(m, src) -} -func (m *AllowedFlexVolume) XXX_Size() int { - return m.Size() -} -func (m *AllowedFlexVolume) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo - -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{2} -} -func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedHostPath) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedHostPath.Merge(m, src) -} -func (m *AllowedHostPath) XXX_Size() int { - return m.Size() -} -func (m *AllowedHostPath) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo - func (m *Eviction) Reset() { *m = Eviction{} } func (*Eviction) ProtoMessage() {} func (*Eviction) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{3} + return fileDescriptor_014060e454a820dc, []int{0} } func (m *Eviction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,94 +75,10 @@ func (m *Eviction) XXX_DiscardUnknown() { var xxx_messageInfo_Eviction proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{4} -} -func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) -} -func (m *FSGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo - -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{5} -} -func (m *HostPortRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostPortRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostPortRange.Merge(m, src) -} -func (m *HostPortRange) XXX_Size() int { - return m.Size() -} -func (m *HostPortRange) XXX_DiscardUnknown() { - xxx_messageInfo_HostPortRange.DiscardUnknown(m) -} - -var xxx_messageInfo_HostPortRange proto.InternalMessageInfo - -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{6} -} -func (m *IDRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IDRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_IDRange.Merge(m, src) -} -func (m *IDRange) XXX_Size() int { - return m.Size() -} -func (m *IDRange) XXX_DiscardUnknown() { - xxx_messageInfo_IDRange.DiscardUnknown(m) -} - -var xxx_messageInfo_IDRange proto.InternalMessageInfo - func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } func (*PodDisruptionBudget) ProtoMessage() {} func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{7} + return fileDescriptor_014060e454a820dc, []int{1} } func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +106,7 @@ var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } func (*PodDisruptionBudgetList) ProtoMessage() {} func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{8} + return fileDescriptor_014060e454a820dc, []int{2} } func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +134,7 @@ var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } func (*PodDisruptionBudgetSpec) ProtoMessage() {} func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{9} + return fileDescriptor_014060e454a820dc, []int{3} } func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +162,7 @@ var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{10} + return fileDescriptor_014060e454a820dc, []int{4} } func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,251 +187,13 @@ func (m *PodDisruptionBudgetStatus) XXX_DiscardUnknown() { var xxx_messageInfo_PodDisruptionBudgetStatus proto.InternalMessageInfo -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{11} -} -func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicy.Merge(m, src) -} -func (m *PodSecurityPolicy) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo - -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{12} -} -func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) -} -func (m *PodSecurityPolicyList) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo - -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{13} -} -func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) -} -func (m *PodSecurityPolicySpec) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo - -func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } -func (*RunAsGroupStrategyOptions) ProtoMessage() {} -func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{14} -} -func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) -} -func (m *RunAsGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo - -func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } -func (*RunAsUserStrategyOptions) ProtoMessage() {} -func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{15} -} -func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) -} -func (m *RunAsUserStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo - -func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } -func (*RuntimeClassStrategyOptions) ProtoMessage() {} -func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{16} -} -func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) -} -func (m *RuntimeClassStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo - -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{17} -} -func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) -} -func (m *SELinuxStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo - -func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } -func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} -func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{18} -} -func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo - func init() { - proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.policy.v1beta1.AllowedCSIDriver") - proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.policy.v1beta1.AllowedFlexVolume") - proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.policy.v1beta1.AllowedHostPath") proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1beta1.Eviction") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.FSGroupStrategyOptions") - proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.policy.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "k8s.io.api.policy.v1beta1.IDRange") proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudget") proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetList") proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec") proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus") proto.RegisterMapType((map[string]v1.Time)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus.DisruptedPodsEntry") - proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") - proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsGroupStrategyOptions") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RuntimeClassStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") } func init() { @@ -609,132 +201,64 @@ func init() { } var fileDescriptor_014060e454a820dc = []byte{ - // 1946 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0x5b, 0x73, 0xdb, 0xc6, - 0x15, 0x16, 0x4c, 0x5d, 0xa8, 0xd5, 0xc5, 0xe2, 0xea, 0x62, 0x48, 0x69, 0x08, 0x07, 0x99, 0xe9, - 0xb8, 0x69, 0x0a, 0xc6, 0xb2, 0xe3, 0x7a, 0x9a, 0x5e, 0x2c, 0x88, 0x92, 0xad, 0x8c, 0x65, 0xb1, - 0x4b, 0x2b, 0xd3, 0x76, 0xdc, 0x4e, 0x97, 0xc0, 0x8a, 0x44, 0x04, 0x02, 0x28, 0x76, 0xc1, 0x88, - 0x6f, 0x79, 0xe8, 0x43, 0x1f, 0xfb, 0x07, 0x32, 0xfd, 0x01, 0x9d, 0x3e, 0xf5, 0x47, 0xd4, 0x99, - 0xe9, 0x74, 0xd2, 0xb7, 0x4c, 0x1f, 0x38, 0x35, 0xfb, 0x2f, 0xfc, 0xd4, 0xc1, 0x72, 0x01, 0x12, - 0x37, 0xd2, 0xce, 0x8c, 0xfd, 0x46, 0xec, 0xf9, 0xbe, 0xef, 0xec, 0x9e, 0xdd, 0x3d, 0x67, 0x77, - 0x09, 0xf4, 0xcb, 0xfb, 0x54, 0xb3, 0xdc, 0xda, 0x65, 0xd0, 0x22, 0xbe, 0x43, 0x18, 0xa1, 0xb5, - 0x1e, 0x71, 0x4c, 0xd7, 0xaf, 0x09, 0x03, 0xf6, 0xac, 0x9a, 0xe7, 0xda, 0x96, 0xd1, 0xaf, 0xf5, - 0x6e, 0xb7, 0x08, 0xc3, 0xb7, 0x6b, 0x6d, 0xe2, 0x10, 0x1f, 0x33, 0x62, 0x6a, 0x9e, 0xef, 0x32, - 0x17, 0xee, 0x8e, 0xa0, 0x1a, 0xf6, 0x2c, 0x6d, 0x04, 0xd5, 0x04, 0x74, 0xef, 0x47, 0x6d, 0x8b, - 0x75, 0x82, 0x96, 0x66, 0xb8, 0xdd, 0x5a, 0xdb, 0x6d, 0xbb, 0x35, 0xce, 0x68, 0x05, 0x17, 0xfc, - 0x8b, 0x7f, 0xf0, 0x5f, 0x23, 0xa5, 0x3d, 0x75, 0xc2, 0xa9, 0xe1, 0xfa, 0xa4, 0xd6, 0xcb, 0x78, - 0xdb, 0xbb, 0x3b, 0xc6, 0x74, 0xb1, 0xd1, 0xb1, 0x1c, 0xe2, 0xf7, 0x6b, 0xde, 0x65, 0x3b, 0x6c, - 0xa0, 0xb5, 0x2e, 0x61, 0x38, 0x8f, 0x55, 0x2b, 0x62, 0xf9, 0x81, 0xc3, 0xac, 0x2e, 0xc9, 0x10, - 0xee, 0xcd, 0x22, 0x50, 0xa3, 0x43, 0xba, 0x38, 0xc3, 0xbb, 0x53, 0xc4, 0x0b, 0x98, 0x65, 0xd7, - 0x2c, 0x87, 0x51, 0xe6, 0xa7, 0x49, 0xea, 0x5d, 0xb0, 0x71, 0x60, 0xdb, 0xee, 0x17, 0xc4, 0x3c, - 0x6c, 0x9e, 0xd4, 0x7d, 0xab, 0x47, 0x7c, 0x78, 0x13, 0xcc, 0x3b, 0xb8, 0x4b, 0x64, 0xe9, 0xa6, - 0x74, 0x6b, 0x59, 0x5f, 0x7d, 0x3e, 0x50, 0xe6, 0x86, 0x03, 0x65, 0xfe, 0x09, 0xee, 0x12, 0xc4, - 0x2d, 0xea, 0x27, 0xa0, 0x22, 0x58, 0xc7, 0x36, 0xb9, 0xfa, 0xcc, 0xb5, 0x83, 0x2e, 0x81, 0xdf, - 0x07, 0x8b, 0x26, 0x17, 0x10, 0xc4, 0x75, 0x41, 0x5c, 0x1c, 0xc9, 0x22, 0x61, 0x55, 0x29, 0xb8, - 0x2e, 0xc8, 0x8f, 0x5c, 0xca, 0x1a, 0x98, 0x75, 0xe0, 0x3e, 0x00, 0x1e, 0x66, 0x9d, 0x86, 0x4f, - 0x2e, 0xac, 0x2b, 0x41, 0x87, 0x82, 0x0e, 0x1a, 0xb1, 0x05, 0x4d, 0xa0, 0xe0, 0x87, 0xa0, 0xec, - 0x13, 0x6c, 0x9e, 0x39, 0x76, 0x5f, 0xbe, 0x76, 0x53, 0xba, 0x55, 0xd6, 0x37, 0x04, 0xa3, 0x8c, - 0x44, 0x3b, 0x8a, 0x11, 0xea, 0x7f, 0x24, 0x50, 0x3e, 0xea, 0x59, 0x06, 0xb3, 0x5c, 0x07, 0xfe, - 0x1e, 0x94, 0xc3, 0xd9, 0x32, 0x31, 0xc3, 0xdc, 0xd9, 0xca, 0xfe, 0x47, 0xda, 0x78, 0x25, 0xc5, - 0xc1, 0xd3, 0xbc, 0xcb, 0x76, 0xd8, 0x40, 0xb5, 0x10, 0xad, 0xf5, 0x6e, 0x6b, 0x67, 0xad, 0xcf, - 0x89, 0xc1, 0x4e, 0x09, 0xc3, 0xe3, 0xee, 0x8d, 0xdb, 0x50, 0xac, 0x0a, 0x6d, 0xb0, 0x66, 0x12, - 0x9b, 0x30, 0x72, 0xe6, 0x85, 0x1e, 0x29, 0xef, 0xe1, 0xca, 0xfe, 0x9d, 0x57, 0x73, 0x53, 0x9f, - 0xa4, 0xea, 0x95, 0xe1, 0x40, 0x59, 0x4b, 0x34, 0xa1, 0xa4, 0xb8, 0xfa, 0x95, 0x04, 0x76, 0x8e, - 0x9b, 0x0f, 0x7d, 0x37, 0xf0, 0x9a, 0x2c, 0x9c, 0xdd, 0x76, 0x5f, 0x98, 0xe0, 0x8f, 0xc1, 0xbc, - 0x1f, 0xd8, 0xd1, 0x5c, 0xbe, 0x1f, 0xcd, 0x25, 0x0a, 0x6c, 0xf2, 0x72, 0xa0, 0x6c, 0xa6, 0x58, - 0x4f, 0xfb, 0x1e, 0x41, 0x9c, 0x00, 0x3f, 0x05, 0x8b, 0x3e, 0x76, 0xda, 0x24, 0xec, 0x7a, 0xe9, - 0xd6, 0xca, 0xbe, 0xaa, 0x15, 0xee, 0x35, 0xed, 0xa4, 0x8e, 0x42, 0xe8, 0x78, 0xc6, 0xf9, 0x27, - 0x45, 0x42, 0x41, 0x3d, 0x05, 0x6b, 0x7c, 0xaa, 0x5d, 0x9f, 0x71, 0x0b, 0x7c, 0x17, 0x94, 0xba, - 0x96, 0xc3, 0x3b, 0xb5, 0xa0, 0xaf, 0x08, 0x56, 0xe9, 0xd4, 0x72, 0x50, 0xd8, 0xce, 0xcd, 0xf8, - 0x8a, 0xc7, 0x6c, 0xd2, 0x8c, 0xaf, 0x50, 0xd8, 0xae, 0x3e, 0x04, 0x4b, 0xc2, 0xe3, 0xa4, 0x50, - 0x69, 0xba, 0x50, 0x29, 0x47, 0xe8, 0xaf, 0xd7, 0xc0, 0x66, 0xc3, 0x35, 0xeb, 0x16, 0xf5, 0x03, - 0x1e, 0x2f, 0x3d, 0x30, 0xdb, 0x84, 0xbd, 0x85, 0xf5, 0xf1, 0x14, 0xcc, 0x53, 0x8f, 0x18, 0x62, - 0x59, 0xec, 0x4f, 0x89, 0x6d, 0x4e, 0xff, 0x9a, 0x1e, 0x31, 0xc6, 0xdb, 0x32, 0xfc, 0x42, 0x5c, - 0x0d, 0x3e, 0x03, 0x8b, 0x94, 0x61, 0x16, 0x50, 0xb9, 0xc4, 0x75, 0xef, 0xbe, 0xa6, 0x2e, 0xe7, - 0x8e, 0x67, 0x71, 0xf4, 0x8d, 0x84, 0xa6, 0xfa, 0x4f, 0x09, 0xdc, 0xc8, 0x61, 0x3d, 0xb6, 0x28, - 0x83, 0xcf, 0x32, 0x11, 0xd3, 0x5e, 0x2d, 0x62, 0x21, 0x9b, 0xc7, 0x2b, 0xde, 0xbc, 0x51, 0xcb, - 0x44, 0xb4, 0x9a, 0x60, 0xc1, 0x62, 0xa4, 0x1b, 0x2d, 0x45, 0xed, 0xf5, 0x86, 0xa5, 0xaf, 0x09, - 0xe9, 0x85, 0x93, 0x50, 0x04, 0x8d, 0xb4, 0xd4, 0x7f, 0x97, 0x72, 0x87, 0x13, 0x86, 0x13, 0x5e, - 0x80, 0xd5, 0xae, 0xe5, 0x1c, 0xf4, 0xb0, 0x65, 0xe3, 0x96, 0xd8, 0x3d, 0xd3, 0x16, 0x41, 0x98, - 0x61, 0xb5, 0x51, 0x86, 0xd5, 0x4e, 0x1c, 0x76, 0xe6, 0x37, 0x99, 0x6f, 0x39, 0x6d, 0x7d, 0x63, - 0x38, 0x50, 0x56, 0x4f, 0x27, 0x94, 0x50, 0x42, 0x17, 0xfe, 0x16, 0x94, 0x29, 0xb1, 0x89, 0xc1, - 0x5c, 0xff, 0xf5, 0x32, 0xc4, 0x63, 0xdc, 0x22, 0x76, 0x53, 0x50, 0xf5, 0xd5, 0x30, 0x6e, 0xd1, - 0x17, 0x8a, 0x25, 0xa1, 0x0d, 0xd6, 0xbb, 0xf8, 0xea, 0xdc, 0xc1, 0xf1, 0x40, 0x4a, 0xdf, 0x71, - 0x20, 0x70, 0x38, 0x50, 0xd6, 0x4f, 0x13, 0x5a, 0x28, 0xa5, 0x0d, 0xbf, 0x94, 0xc0, 0x5e, 0xe0, - 0x74, 0x08, 0xb6, 0x59, 0xa7, 0xdf, 0x70, 0xcd, 0x28, 0xdd, 0x36, 0xf8, 0x0c, 0xc9, 0xf3, 0x3c, - 0x03, 0x3d, 0x18, 0x0e, 0x94, 0xbd, 0xf3, 0x42, 0xd4, 0xcb, 0x81, 0x52, 0x2d, 0xb6, 0xf2, 0xf4, - 0x34, 0xc5, 0x87, 0xfa, 0x8f, 0x05, 0xb0, 0x5b, 0xb8, 0xb0, 0xe1, 0xa7, 0x00, 0xba, 0x2d, 0x4a, - 0xfc, 0x1e, 0x31, 0x1f, 0x8e, 0xca, 0xa0, 0xe5, 0x46, 0xb9, 0x63, 0x4f, 0xac, 0x11, 0x78, 0x96, - 0x41, 0xa0, 0x1c, 0x16, 0xfc, 0xa3, 0x04, 0xd6, 0xcc, 0x91, 0x1b, 0x62, 0x36, 0x5c, 0x33, 0x5a, - 0x9b, 0x0f, 0xbf, 0xcb, 0x96, 0xd3, 0xea, 0x93, 0x4a, 0x47, 0x0e, 0xf3, 0xfb, 0xfa, 0xb6, 0xe8, - 0xd0, 0x5a, 0xc2, 0x86, 0x92, 0x4e, 0xc3, 0x21, 0x99, 0xb1, 0x24, 0x15, 0x65, 0x95, 0xcf, 0xf2, - 0xc2, 0x78, 0x48, 0xf5, 0x0c, 0x02, 0xe5, 0xb0, 0xe0, 0xcf, 0xc1, 0xba, 0x11, 0xf8, 0x3e, 0x71, - 0xd8, 0xa3, 0x51, 0x7c, 0xf9, 0x94, 0x2d, 0xe8, 0x3b, 0x42, 0x67, 0xfd, 0x30, 0x61, 0x45, 0x29, - 0x74, 0xc8, 0x37, 0x09, 0xb5, 0x7c, 0x62, 0x46, 0xfc, 0x85, 0x24, 0xbf, 0x9e, 0xb0, 0xa2, 0x14, - 0x1a, 0xde, 0x07, 0xab, 0xe4, 0xca, 0x23, 0x46, 0x14, 0xd0, 0x45, 0xce, 0xde, 0x12, 0xec, 0xd5, - 0xa3, 0x09, 0x1b, 0x4a, 0x20, 0xa1, 0x01, 0x80, 0xe1, 0x3a, 0xa6, 0x35, 0x2a, 0xb5, 0x4b, 0x7c, - 0x22, 0x6a, 0xaf, 0xb6, 0x91, 0x0e, 0x23, 0xde, 0x38, 0x61, 0xc7, 0x4d, 0x14, 0x4d, 0xc8, 0xee, - 0xd9, 0x00, 0x66, 0xa7, 0x09, 0x6e, 0x80, 0xd2, 0x25, 0xe9, 0x8f, 0xca, 0x2b, 0x0a, 0x7f, 0xc2, - 0x07, 0x60, 0xa1, 0x87, 0xed, 0x80, 0x88, 0x0d, 0xfd, 0xc1, 0xab, 0xf5, 0xe3, 0xa9, 0xd5, 0x25, - 0x68, 0x44, 0xfc, 0xc9, 0xb5, 0xfb, 0x92, 0xfa, 0xb5, 0x04, 0x2a, 0x0d, 0xd7, 0x6c, 0x12, 0x23, - 0xf0, 0x2d, 0xd6, 0x1f, 0xad, 0xef, 0xb7, 0x50, 0x98, 0x50, 0xa2, 0x30, 0x7d, 0x34, 0x7d, 0x35, - 0x27, 0x7b, 0x57, 0x54, 0x96, 0xd4, 0xe7, 0x12, 0xd8, 0xce, 0xa0, 0xdf, 0x42, 0xd9, 0xf8, 0x65, - 0xb2, 0x6c, 0x7c, 0xf8, 0x3a, 0x83, 0x29, 0x28, 0x1a, 0x5f, 0x57, 0x72, 0x86, 0xc2, 0x4b, 0x46, - 0x78, 0x84, 0xf5, 0xad, 0x9e, 0x65, 0x93, 0x36, 0x31, 0xf9, 0x60, 0xca, 0x13, 0x47, 0xd8, 0xd8, - 0x82, 0x26, 0x50, 0x90, 0x82, 0x1d, 0x93, 0x5c, 0xe0, 0xc0, 0x66, 0x07, 0xa6, 0x79, 0x88, 0x3d, - 0xdc, 0xb2, 0x6c, 0x8b, 0x59, 0xe2, 0xcc, 0xb5, 0xac, 0x7f, 0x32, 0x1c, 0x28, 0x3b, 0xf5, 0x5c, - 0xc4, 0xcb, 0x81, 0xf2, 0x6e, 0xf6, 0xca, 0xa2, 0xc5, 0x90, 0x3e, 0x2a, 0x90, 0x86, 0x7d, 0x20, - 0xfb, 0xe4, 0x0f, 0x41, 0xb8, 0xf3, 0xea, 0xbe, 0xeb, 0x25, 0xdc, 0x96, 0xb8, 0xdb, 0x9f, 0x0d, - 0x07, 0x8a, 0x8c, 0x0a, 0x30, 0xb3, 0x1d, 0x17, 0xca, 0xc3, 0xcf, 0xc1, 0x26, 0x16, 0x97, 0x8d, - 0x49, 0xaf, 0xf3, 0xdc, 0xeb, 0xfd, 0xe1, 0x40, 0xd9, 0x3c, 0xc8, 0x9a, 0x67, 0x3b, 0xcc, 0x13, - 0x85, 0x35, 0xb0, 0xd4, 0xe3, 0xf7, 0x12, 0x2a, 0x2f, 0x70, 0xfd, 0xed, 0xe1, 0x40, 0x59, 0x1a, - 0x5d, 0x55, 0x42, 0xcd, 0xc5, 0xe3, 0x26, 0x2f, 0x27, 0x11, 0x0a, 0x7e, 0x0c, 0x56, 0x3a, 0x2e, - 0x65, 0x4f, 0x08, 0xfb, 0xc2, 0xf5, 0x2f, 0x79, 0xf6, 0x29, 0xeb, 0x9b, 0x62, 0x06, 0x57, 0x1e, - 0x8d, 0x4d, 0x68, 0x12, 0x07, 0x7f, 0x0d, 0x96, 0x3b, 0xe2, 0x6c, 0x1b, 0xa5, 0x9e, 0x5b, 0x53, - 0x16, 0x5a, 0xe2, 0x1c, 0xac, 0x57, 0x84, 0xfc, 0x72, 0xd4, 0x4c, 0xd1, 0x58, 0x0d, 0xfe, 0x00, - 0x2c, 0xf1, 0x8f, 0x93, 0xba, 0x5c, 0xe6, 0xbd, 0xb9, 0x2e, 0xe0, 0x4b, 0x8f, 0x46, 0xcd, 0x28, - 0xb2, 0x47, 0xd0, 0x93, 0xc6, 0xa1, 0xbc, 0x9c, 0x85, 0x9e, 0x34, 0x0e, 0x51, 0x64, 0x87, 0xcf, - 0xc0, 0x12, 0x25, 0x8f, 0x2d, 0x27, 0xb8, 0x92, 0x01, 0xdf, 0x72, 0xb7, 0xa7, 0x74, 0xb7, 0x79, - 0xc4, 0x91, 0xa9, 0x5b, 0xc5, 0x58, 0x5d, 0xd8, 0x51, 0x24, 0x09, 0x4d, 0xb0, 0xec, 0x07, 0xce, - 0x01, 0x3d, 0xa7, 0xc4, 0x97, 0x57, 0x32, 0x47, 0x9a, 0xb4, 0x3e, 0x8a, 0xb0, 0x69, 0x0f, 0x71, - 0x64, 0x62, 0x04, 0x1a, 0x0b, 0x43, 0x13, 0x00, 0xfe, 0xc1, 0x2f, 0x2f, 0xf2, 0xce, 0xcc, 0xc3, - 0x2e, 0x8a, 0xc1, 0x69, 0x3f, 0xeb, 0xe1, 0xf6, 0x1c, 0x9b, 0xd1, 0x84, 0x2e, 0xfc, 0x93, 0x04, - 0x20, 0x0d, 0x3c, 0xcf, 0x26, 0x5d, 0xe2, 0x30, 0x6c, 0xf3, 0x56, 0x2a, 0xaf, 0x72, 0x77, 0x3f, - 0x9d, 0x16, 0xb5, 0x0c, 0x29, 0xed, 0x36, 0xae, 0xcd, 0x59, 0x28, 0xca, 0xf1, 0x19, 0x4e, 0xda, - 0x85, 0x18, 0xed, 0xda, 0xcc, 0x49, 0xcb, 0xbf, 0x0a, 0x8e, 0x27, 0x4d, 0xd8, 0x51, 0x24, 0x09, - 0x3f, 0x03, 0x3b, 0xd1, 0x45, 0x19, 0xb9, 0x2e, 0x3b, 0xb6, 0x6c, 0x42, 0xfb, 0x94, 0x91, 0xae, - 0xbc, 0xce, 0x17, 0x53, 0x55, 0x30, 0x77, 0x50, 0x2e, 0x0a, 0x15, 0xb0, 0x61, 0x17, 0x28, 0x51, - 0x12, 0x0a, 0x77, 0x68, 0x9c, 0x05, 0x8f, 0xa8, 0x81, 0xed, 0xd1, 0xe9, 0xeb, 0x3a, 0x77, 0xf0, - 0xfe, 0x70, 0xa0, 0x28, 0xf5, 0xe9, 0x50, 0x34, 0x4b, 0x0b, 0xfe, 0x0a, 0xc8, 0xb8, 0xc8, 0xcf, - 0x06, 0xf7, 0xf3, 0xbd, 0x30, 0xb3, 0x15, 0x3a, 0x28, 0x64, 0x43, 0x0f, 0x6c, 0xe0, 0xe4, 0x93, - 0x05, 0x95, 0x2b, 0x7c, 0xaf, 0x7f, 0x30, 0x65, 0x1e, 0x52, 0xaf, 0x1c, 0xba, 0x2c, 0xc2, 0xb8, - 0x91, 0x32, 0x50, 0x94, 0x51, 0x87, 0x57, 0x00, 0xe2, 0xf4, 0x0b, 0x0b, 0x95, 0xe1, 0xcc, 0x42, - 0x96, 0x79, 0x96, 0x19, 0x2f, 0xb5, 0x8c, 0x89, 0xa2, 0x1c, 0x1f, 0x90, 0x81, 0x0a, 0x4e, 0xbd, - 0x08, 0x51, 0xf9, 0x06, 0x77, 0xfc, 0xc3, 0xd9, 0x8e, 0x63, 0x8e, 0xbe, 0x2b, 0xfc, 0x56, 0xd2, - 0x16, 0x8a, 0xb2, 0x0e, 0xe0, 0x63, 0xb0, 0x25, 0x1a, 0xcf, 0x1d, 0x8a, 0x2f, 0x48, 0xb3, 0x4f, - 0x0d, 0x66, 0x53, 0x79, 0x93, 0xe7, 0x6e, 0x79, 0x38, 0x50, 0xb6, 0x0e, 0x72, 0xec, 0x28, 0x97, - 0x05, 0x1f, 0x80, 0x8d, 0x0b, 0xd7, 0x6f, 0x59, 0xa6, 0x49, 0x9c, 0x48, 0x69, 0x8b, 0x2b, 0x6d, - 0x85, 0xf1, 0x3f, 0x4e, 0xd9, 0x50, 0x06, 0x0d, 0x29, 0xd8, 0x16, 0xca, 0x0d, 0xdf, 0x35, 0x4e, - 0xdd, 0xc0, 0x61, 0x61, 0xb9, 0xa0, 0xf2, 0x76, 0x5c, 0x22, 0xb7, 0x0f, 0xf2, 0x00, 0x2f, 0x07, - 0xca, 0xcd, 0x9c, 0x72, 0x95, 0x00, 0xa1, 0x7c, 0x6d, 0x68, 0x83, 0x55, 0xf1, 0xc6, 0x77, 0x68, - 0x63, 0x4a, 0x65, 0x99, 0x6f, 0xf5, 0x7b, 0xd3, 0x13, 0x5b, 0x0c, 0x4f, 0xef, 0x77, 0x7e, 0xf9, - 0x9c, 0x04, 0xa0, 0x84, 0xba, 0xfa, 0x17, 0x09, 0xec, 0x16, 0x26, 0x46, 0x78, 0x2f, 0xf1, 0x70, - 0xa4, 0xa6, 0x1e, 0x8e, 0x60, 0x96, 0xf8, 0x06, 0xde, 0x8d, 0xbe, 0x92, 0x80, 0x5c, 0x54, 0x21, - 0xe0, 0xc7, 0x89, 0x0e, 0xbe, 0x97, 0xea, 0x60, 0x25, 0xc3, 0x7b, 0x03, 0xfd, 0xfb, 0x97, 0x04, - 0xde, 0x99, 0x32, 0x03, 0x71, 0x42, 0x22, 0xe6, 0x24, 0xea, 0x09, 0x0e, 0xb7, 0xb2, 0xc4, 0xd7, - 0xd1, 0x38, 0x21, 0xe5, 0x60, 0x50, 0x21, 0x1b, 0x9e, 0x83, 0x1b, 0x22, 0x1b, 0xa6, 0x6d, 0xfc, - 0xe4, 0xbe, 0xac, 0xbf, 0x33, 0x1c, 0x28, 0x37, 0xea, 0xf9, 0x10, 0x54, 0xc4, 0x55, 0xff, 0x26, - 0x81, 0x9d, 0xfc, 0x92, 0x0f, 0xef, 0x24, 0xc2, 0xad, 0xa4, 0xc2, 0x7d, 0x3d, 0xc5, 0x12, 0xc1, - 0xfe, 0x1d, 0x58, 0x17, 0x07, 0x83, 0xe4, 0x3b, 0x68, 0x22, 0xe8, 0xe1, 0x16, 0x09, 0xcf, 0xf4, - 0x42, 0x22, 0x5a, 0xbe, 0xfc, 0xc9, 0x21, 0xd9, 0x86, 0x52, 0x6a, 0xea, 0xdf, 0x25, 0xf0, 0xde, - 0xcc, 0x62, 0x0b, 0xf5, 0x44, 0xd7, 0xb5, 0x54, 0xd7, 0xab, 0xc5, 0x02, 0x6f, 0xe6, 0x39, 0x54, - 0xff, 0xc5, 0xf3, 0x17, 0xd5, 0xb9, 0x6f, 0x5e, 0x54, 0xe7, 0xbe, 0x7d, 0x51, 0x9d, 0xfb, 0x72, - 0x58, 0x95, 0x9e, 0x0f, 0xab, 0xd2, 0x37, 0xc3, 0xaa, 0xf4, 0xed, 0xb0, 0x2a, 0xfd, 0x77, 0x58, - 0x95, 0xfe, 0xfc, 0xbf, 0xea, 0xdc, 0x6f, 0x76, 0x0b, 0xff, 0x06, 0xf9, 0x7f, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xb4, 0x84, 0x53, 0xfb, 0x3b, 0x19, 0x00, 0x00, + // 857 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0xc7, 0x45, 0xcb, 0x72, 0x9d, 0xad, 0x24, 0xb8, 0xdb, 0x2f, 0x5b, 0x07, 0x2a, 0xd0, 0x29, + 0x28, 0xd0, 0x65, 0x9d, 0x04, 0x85, 0xd1, 0x43, 0x9b, 0x30, 0x32, 0xd2, 0x14, 0x31, 0x6c, 0xac, + 0x9c, 0x4b, 0x91, 0x02, 0x5d, 0x91, 0x13, 0x69, 0x2b, 0x92, 0x4b, 0x70, 0x97, 0x6c, 0x74, 0xcb, + 0xa1, 0x0f, 0xd0, 0xf7, 0xe8, 0x83, 0xd4, 0x87, 0x1e, 0xd2, 0x5b, 0xd0, 0x83, 0x50, 0xb3, 0x6f, + 0xd1, 0x53, 0xc1, 0x25, 0xf5, 0x41, 0x7d, 0x34, 0x4a, 0x0e, 0xb9, 0x71, 0x67, 0xe6, 0xff, 0x1b, + 0xce, 0xc7, 0x52, 0x42, 0xf6, 0xe8, 0x44, 0x12, 0x2e, 0xac, 0x51, 0xdc, 0x87, 0x28, 0x00, 0x05, + 0xd2, 0x4a, 0x20, 0x70, 0x45, 0x64, 0x15, 0x0e, 0x16, 0x72, 0x2b, 0x14, 0x1e, 0x77, 0xc6, 0x56, + 0x72, 0xdc, 0x07, 0xc5, 0x8e, 0xad, 0x01, 0x04, 0x10, 0x31, 0x05, 0x2e, 0x09, 0x23, 0xa1, 0x04, + 0x3e, 0xca, 0x43, 0x09, 0x0b, 0x39, 0xc9, 0x43, 0x49, 0x11, 0xda, 0xfa, 0x7c, 0xc0, 0xd5, 0x30, + 0xee, 0x13, 0x47, 0xf8, 0xd6, 0x40, 0x0c, 0x84, 0xa5, 0x15, 0xfd, 0xf8, 0x99, 0x3e, 0xe9, 0x83, + 0x7e, 0xca, 0x49, 0xad, 0xbb, 0xf3, 0xa4, 0x3e, 0x73, 0x86, 0x3c, 0x80, 0x68, 0x6c, 0x85, 0xa3, + 0x41, 0x66, 0x90, 0x96, 0x0f, 0x8a, 0x59, 0xc9, 0x4a, 0xfe, 0x96, 0xb5, 0x49, 0x15, 0xc5, 0x81, + 0xe2, 0x3e, 0xac, 0x08, 0xbe, 0x7c, 0x9d, 0x40, 0x3a, 0x43, 0xf0, 0xd9, 0x8a, 0xee, 0xce, 0x26, + 0x5d, 0xac, 0xb8, 0x67, 0xf1, 0x40, 0x49, 0x15, 0x2d, 0x8b, 0x3a, 0x7f, 0x19, 0x68, 0xff, 0x34, + 0xe1, 0x8e, 0xe2, 0x22, 0xc0, 0x3f, 0xa2, 0xfd, 0xac, 0x0a, 0x97, 0x29, 0x76, 0x68, 0xdc, 0x34, + 0x6e, 0xbd, 0x7f, 0xfb, 0x0b, 0x32, 0xef, 0xde, 0x0c, 0x4a, 0xc2, 0xd1, 0x20, 0x33, 0x48, 0x92, + 0x45, 0x93, 0xe4, 0x98, 0x9c, 0xf7, 0x7f, 0x02, 0x47, 0x9d, 0x81, 0x62, 0x36, 0xbe, 0x9a, 0xb4, + 0x2b, 0xe9, 0xa4, 0x8d, 0xe6, 0x36, 0x3a, 0xa3, 0x62, 0x0f, 0x35, 0x5c, 0xf0, 0x40, 0xc1, 0x79, + 0x98, 0x65, 0x94, 0x87, 0x3b, 0x3a, 0xcd, 0x9d, 0xed, 0xd2, 0x74, 0x17, 0xa5, 0xf6, 0x07, 0xe9, + 0xa4, 0xdd, 0x28, 0x99, 0x68, 0x19, 0xde, 0xf9, 0x6d, 0x07, 0x7d, 0x78, 0x21, 0xdc, 0x2e, 0x97, + 0x51, 0xac, 0x4d, 0x76, 0xec, 0x0e, 0x40, 0xbd, 0x83, 0x3a, 0x2f, 0xd1, 0xae, 0x0c, 0xc1, 0x29, + 0xca, 0xbb, 0x4d, 0x36, 0xee, 0x20, 0x59, 0xf3, 0x7e, 0xbd, 0x10, 0x1c, 0xbb, 0x5e, 0xf0, 0x77, + 0xb3, 0x13, 0xd5, 0x34, 0xfc, 0x14, 0xed, 0x49, 0xc5, 0x54, 0x2c, 0x0f, 0xab, 0x9a, 0x7b, 0xf7, + 0x0d, 0xb9, 0x5a, 0x6b, 0x37, 0x0b, 0xf2, 0x5e, 0x7e, 0xa6, 0x05, 0xb3, 0xf3, 0x87, 0x81, 0x3e, + 0x5d, 0xa3, 0x7a, 0xcc, 0xa5, 0xc2, 0x4f, 0x57, 0x3a, 0x46, 0xb6, 0xeb, 0x58, 0xa6, 0xd6, 0xfd, + 0x3a, 0x28, 0xb2, 0xee, 0x4f, 0x2d, 0x0b, 0xdd, 0xea, 0xa1, 0x1a, 0x57, 0xe0, 0x67, 0xdb, 0x50, + 0x5d, 0x42, 0x6f, 0x51, 0x96, 0xdd, 0x28, 0xd0, 0xb5, 0x47, 0x19, 0x84, 0xe6, 0xac, 0xce, 0x9f, + 0xd5, 0xb5, 0xe5, 0x64, 0xed, 0xc4, 0xcf, 0x50, 0xdd, 0xe7, 0xc1, 0xfd, 0x84, 0x71, 0x8f, 0xf5, + 0x3d, 0x78, 0xed, 0x12, 0x64, 0x37, 0x88, 0xe4, 0x37, 0x88, 0x3c, 0x0a, 0xd4, 0x79, 0xd4, 0x53, + 0x11, 0x0f, 0x06, 0xf6, 0x41, 0x3a, 0x69, 0xd7, 0xcf, 0x16, 0x48, 0xb4, 0xc4, 0xc5, 0x3f, 0xa0, + 0x7d, 0x09, 0x1e, 0x38, 0x4a, 0x44, 0x6f, 0xb6, 0xe9, 0x8f, 0x59, 0x1f, 0xbc, 0x5e, 0x21, 0xb5, + 0xeb, 0x59, 0xdf, 0xa6, 0x27, 0x3a, 0x43, 0x62, 0x0f, 0x35, 0x7d, 0xf6, 0xfc, 0x49, 0xc0, 0x66, + 0x85, 0x54, 0xdf, 0xb2, 0x10, 0x9c, 0x4e, 0xda, 0xcd, 0xb3, 0x12, 0x8b, 0x2e, 0xb1, 0xf1, 0x0b, + 0x03, 0xb5, 0xe2, 0x60, 0x08, 0xcc, 0x53, 0xc3, 0xf1, 0x85, 0x70, 0xa7, 0x9f, 0x8d, 0x0b, 0x3d, + 0xa1, 0xc3, 0xdd, 0x9b, 0xc6, 0xad, 0x1b, 0xf6, 0xbd, 0x74, 0xd2, 0x6e, 0x3d, 0xd9, 0x18, 0xf5, + 0xef, 0xa4, 0x6d, 0x6e, 0xf6, 0x5e, 0x8e, 0x43, 0xa0, 0xff, 0x93, 0xa3, 0xf3, 0x7b, 0x0d, 0x1d, + 0x6d, 0x5c, 0x6c, 0xfc, 0x1d, 0xc2, 0xa2, 0x2f, 0x21, 0x4a, 0xc0, 0x7d, 0x98, 0x7f, 0xe6, 0xb8, + 0x08, 0xf4, 0x6c, 0xab, 0x76, 0xab, 0xd8, 0x11, 0x7c, 0xbe, 0x12, 0x41, 0xd7, 0xa8, 0xf0, 0x2f, + 0x06, 0x6a, 0xb8, 0x79, 0x1a, 0x70, 0x2f, 0x84, 0x3b, 0xdd, 0xcd, 0x87, 0x6f, 0x73, 0xe5, 0x48, + 0x77, 0x91, 0x74, 0x1a, 0xa8, 0x68, 0x6c, 0x7f, 0x5c, 0xbc, 0x50, 0xa3, 0xe4, 0xa3, 0xe5, 0xa4, + 0x59, 0x49, 0xee, 0x0c, 0x29, 0xef, 0x7b, 0x9e, 0xf8, 0x19, 0x5c, 0x3d, 0xe5, 0xda, 0xbc, 0xa4, + 0xee, 0x4a, 0x04, 0x5d, 0xa3, 0xc2, 0x5f, 0xa3, 0xa6, 0x13, 0x47, 0x11, 0x04, 0xea, 0xdb, 0xbc, + 0xbf, 0x7a, 0x64, 0x35, 0xfb, 0x93, 0x82, 0xd3, 0x7c, 0x50, 0xf2, 0xd2, 0xa5, 0xe8, 0x4c, 0xef, + 0x82, 0xe4, 0x11, 0xb8, 0x53, 0x7d, 0xad, 0xac, 0xef, 0x96, 0xbc, 0x74, 0x29, 0x1a, 0x9f, 0xa0, + 0x3a, 0x3c, 0x0f, 0xc1, 0x99, 0x36, 0x74, 0x4f, 0xab, 0x3f, 0x2a, 0xd4, 0xf5, 0xd3, 0x05, 0x1f, + 0x2d, 0x45, 0x62, 0x07, 0x21, 0x47, 0x04, 0x2e, 0xcf, 0x7f, 0x32, 0xde, 0xd3, 0x83, 0xb0, 0xb6, + 0xbb, 0x48, 0x0f, 0xa6, 0xba, 0xf9, 0x07, 0x7b, 0x66, 0x92, 0x74, 0x01, 0xdb, 0xf2, 0x10, 0x5e, + 0x1d, 0x13, 0x3e, 0x40, 0xd5, 0x11, 0x8c, 0xf5, 0x12, 0xdd, 0xa0, 0xd9, 0x23, 0xbe, 0x87, 0x6a, + 0x09, 0xf3, 0x62, 0x28, 0x2e, 0xf4, 0x67, 0xdb, 0xbd, 0xc7, 0x25, 0xf7, 0x81, 0xe6, 0xc2, 0xaf, + 0x76, 0x4e, 0x0c, 0xfb, 0x9b, 0xab, 0x6b, 0xb3, 0xf2, 0xf2, 0xda, 0xac, 0xbc, 0xba, 0x36, 0x2b, + 0x2f, 0x52, 0xd3, 0xb8, 0x4a, 0x4d, 0xe3, 0x65, 0x6a, 0x1a, 0xaf, 0x52, 0xd3, 0xf8, 0x3b, 0x35, + 0x8d, 0x5f, 0xff, 0x31, 0x2b, 0xdf, 0x1f, 0x6d, 0xfc, 0x9b, 0xf3, 0x5f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x96, 0x9a, 0x3a, 0xb5, 0x1b, 0x09, 0x00, 0x00, } -func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { +func (m *Eviction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -744,104 +268,12 @@ func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { +func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.PathPrefix) - copy(dAtA[i:], m.PathPrefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Eviction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -871,106 +303,6 @@ func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *HostPortRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *IDRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1218,3446 +550,247 @@ func (m *PodDisruptionBudgetStatus) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA[offset] = uint8(v) + return base } - -func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *Eviction) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeleteOptions != nil { + l = m.DeleteOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *PodDisruptionBudget) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PodDisruptionBudgetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *PodDisruptionBudgetSpec) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.RuntimeClass != nil { - { - size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 - } - if len(m.AllowedCSIDrivers) > 0 { - for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba - } - } - if m.RunAsGroup != nil { - { - size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - if len(m.AllowedProcMountTypes) > 0 { - for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedProcMountTypes[iNdEx]) - copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - } - if len(m.ForbiddenSysctls) > 0 { - for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ForbiddenSysctls[iNdEx]) - copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - } - if len(m.AllowedUnsafeSysctls) > 0 { - for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedUnsafeSysctls[iNdEx]) - copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } - } - if len(m.AllowedFlexVolumes) > 0 { - for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } - } - if len(m.AllowedHostPaths) > 0 { - for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } + if m.MinAvailable != nil { + l = m.MinAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.AllowPrivilegeEscalation != nil { - i-- - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.DefaultAllowPrivilegeEscalation != nil { - i-- - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x78 + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.UnhealthyPodEvictionPolicy != nil { + l = len(*m.UnhealthyPodEvictionPolicy) + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x70 - { - size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return n +} + +func (m *PodDisruptionBudgetStatus) Size() (n int) { + if m == nil { + return 0 } - i-- - dAtA[i] = 0x6a - { - size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if len(m.DisruptedPods) > 0 { + for k, v := range m.DisruptedPods { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x62 - { - size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + n += 1 + sovGenerated(uint64(m.DisruptionsAllowed)) + n += 1 + sovGenerated(uint64(m.CurrentHealthy)) + n += 1 + sovGenerated(uint64(m.DesiredHealthy)) + n += 1 + sovGenerated(uint64(m.ExpectedPods)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x5a - { - size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Eviction) String() string { + if this == nil { + return "nil" } - i-- - dAtA[i] = 0x52 - i-- - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + s := strings.Join([]string{`&Eviction{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudget) String() string { + if this == nil { + return "nil" } - i-- - dAtA[i] = 0x48 - i-- - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + s := strings.Join([]string{`&PodDisruptionBudget{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetList) String() string { + if this == nil { + return "nil" } - i-- - dAtA[i] = 0x40 - if len(m.HostPorts) > 0 { - for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } + repeatedStringForItems := "[]PodDisruptionBudget{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + "," } - i-- - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodDisruptionBudgetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetSpec) String() string { + if this == nil { + return "nil" } - i-- - dAtA[i] = 0x30 - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Volumes[iNdEx]) - copy(dAtA[i:], m.Volumes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) - i-- - dAtA[i] = 0x2a - } + s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, + `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `UnhealthyPodEvictionPolicy:` + valueToStringGenerated(this.UnhealthyPodEvictionPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetStatus) String() string { + if this == nil { + return "nil" } - if len(m.AllowedCapabilities) > 0 { - for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedCapabilities[iNdEx]) - copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," } - if len(m.RequiredDropCapabilities) > 0 { - for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RequiredDropCapabilities[iNdEx]) - copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + repeatedStringForConditions += "}" + keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods)) + for k := range this.DisruptedPods { + keysForDisruptedPods = append(keysForDisruptedPods, k) } - if len(m.DefaultAddCapabilities) > 0 { - for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DefaultAddCapabilities[iNdEx]) - copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) + mapStringForDisruptedPods := "map[string]v1.Time{" + for _, k := range keysForDisruptedPods { + mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) } - i-- - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + mapStringForDisruptedPods += "}" + s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `DisruptedPods:` + mapStringForDisruptedPods + `,`, + `DisruptionsAllowed:` + fmt.Sprintf("%v", this.DisruptionsAllowed) + `,`, + `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, + `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, + `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) } - -func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DefaultRuntimeClassName != nil { - i -= len(*m.DefaultRuntimeClassName) - copy(dAtA[i:], *m.DefaultRuntimeClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i-- - dAtA[i] = 0x12 - } - if len(m.AllowedRuntimeClassNames) > 0 { - for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedRuntimeClassNames[iNdEx]) - copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SELinuxOptions != nil { - { - size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AllowedCSIDriver) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AllowedFlexVolume) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AllowedHostPath) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PathPrefix) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *Eviction) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.DeleteOptions != nil { - l = m.DeleteOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *FSGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HostPortRange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *IDRange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *PodDisruptionBudget) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodDisruptionBudgetList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodDisruptionBudgetSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MinAvailable != nil { - l = m.MinAvailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.UnhealthyPodEvictionPolicy != nil { - l = len(*m.UnhealthyPodEvictionPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PodDisruptionBudgetStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - if len(m.DisruptedPods) > 0 { - for k, v := range m.DisruptedPods { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - n += 1 + sovGenerated(uint64(m.DisruptionsAllowed)) - n += 1 + sovGenerated(uint64(m.CurrentHealthy)) - n += 1 + sovGenerated(uint64(m.DesiredHealthy)) - n += 1 + sovGenerated(uint64(m.ExpectedPods)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSecurityPolicy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodSecurityPolicyList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSecurityPolicySpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - if len(m.HostPorts) > 0 { - for _, e := range m.HostPorts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - n += 2 - l = m.SELinux.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.RunAsUser.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.SupplementalGroups.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FSGroup.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.DefaultAllowPrivilegeEscalation != nil { - n += 2 - } - if m.AllowPrivilegeEscalation != nil { - n += 3 - } - if len(m.AllowedHostPaths) > 0 { - for _, e := range m.AllowedHostPaths { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedFlexVolumes) > 0 { - for _, e := range m.AllowedFlexVolumes { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RunAsGroup != nil { - l = m.RunAsGroup.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedCSIDrivers) > 0 { - for _, e := range m.AllowedCSIDrivers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RuntimeClass != nil { - l = m.RuntimeClass.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RunAsGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RunAsUserStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RuntimeClassStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultRuntimeClassName != nil { - l = len(*m.DefaultRuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SELinuxStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AllowedCSIDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedCSIDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedFlexVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedFlexVolume{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedHostPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedHostPath{`, - `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Eviction) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Eviction{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *FSGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&FSGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *HostPortRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostPortRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IDRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IDRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudget) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDisruptionBudget{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodDisruptionBudget{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodDisruptionBudgetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, - `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `UnhealthyPodEvictionPolicy:` + valueToStringGenerated(this.UnhealthyPodEvictionPolicy) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods)) - for k := range this.DisruptedPods { - keysForDisruptedPods = append(keysForDisruptedPods, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - mapStringForDisruptedPods := "map[string]v1.Time{" - for _, k := range keysForDisruptedPods { - mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) - } - mapStringForDisruptedPods += "}" - s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `DisruptedPods:` + mapStringForDisruptedPods + `,`, - `DisruptionsAllowed:` + fmt.Sprintf("%v", this.DisruptionsAllowed) + `,`, - `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, - `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, - `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodSecurityPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForHostPorts := "[]HostPortRange{" - for _, f := range this.HostPorts { - repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," - } - repeatedStringForHostPorts += "}" - repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" - for _, f := range this.AllowedHostPaths { - repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedHostPaths += "}" - repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" - for _, f := range this.AllowedFlexVolumes { - repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedFlexVolumes += "}" - repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" - for _, f := range this.AllowedCSIDrivers { - repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedCSIDrivers += "}" - s := strings.Join([]string{`&PodSecurityPolicySpec{`, - `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, - `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, - `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, - `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, - `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + repeatedStringForHostPorts + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, - `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, - `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, - `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, - `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, - `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, - `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, - `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, - `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, - `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, - `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RunAsGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RunAsUserStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsUserStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, - `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, - `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SELinuxStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *SupplementalGroupsStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Eviction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Eviction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeleteOptions == nil { - m.DeleteOptions = &v1.DeleteOptions{} - } - if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostPortRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - m.Min = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Min |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IDRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IDRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - m.Min = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Min |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodDisruptionBudget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MinAvailable == nil { - m.MinAvailable = &intstr.IntOrString{} - } - if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UnhealthyPodEvictionPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := UnhealthyPodEvictionPolicyType(dAtA[iNdEx:postIndex]) - m.UnhealthyPodEvictionPolicy = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DisruptedPods == nil { - m.DisruptedPods = make(map[string]v1.Time) - } - var mapkey string - mapvalue := &v1.Time{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &v1.Time{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.DisruptedPods[mapkey] = *mapvalue - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisruptionsAllowed", wireType) - } - m.DisruptionsAllowed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DisruptionsAllowed |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) - } - m.CurrentHealthy = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentHealthy |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) - } - m.DesiredHealthy = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredHealthy |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) - } - m.ExpectedPods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExpectedPods |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodSecurityPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Privileged = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HostNetwork = bool(v != 0) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostPorts = append(m.HostPorts, HostPortRange{}) - if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HostPID = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HostIPC = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnlyRootFilesystem = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.DefaultAllowPrivilegeEscalation = &b - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } +func (m *Eviction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - b := bool(v != 0) - m.AllowPrivilegeEscalation = &b - case 17: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Eviction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4684,14 +817,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) - if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 18: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4718,110 +850,66 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) - if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.DeleteOptions == nil { + m.DeleteOptions = &v1.DeleteOptions{} + } + if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 22: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4848,16 +936,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 23: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4884,14 +969,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 24: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4918,10 +1002,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RuntimeClass == nil { - m.RuntimeClass = &RuntimeClassStrategyOptions{} - } - if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4946,7 +1027,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4969,17 +1050,17 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4989,27 +1070,28 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5036,8 +1118,8 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, PodDisruptionBudget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5062,7 +1144,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5085,17 +1167,17 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5105,27 +1187,31 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) + if m.MinAvailable == nil { + m.MinAvailable = &intstr.IntOrString{} + } + if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5152,66 +1238,18 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5221,27 +1259,31 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UnhealthyPodEvictionPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5269,8 +1311,8 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.DefaultRuntimeClassName = &s + s := UnhealthyPodEvictionPolicyType(dAtA[iNdEx:postIndex]) + m.UnhealthyPodEvictionPolicy = &s iNdEx = postIndex default: iNdEx = preIndex @@ -5293,7 +1335,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5316,17 +1358,17 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - var stringLen uint64 + m.ObservedGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5336,27 +1378,14 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5383,68 +1412,149 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &v11.SELinuxOptions{} + if m.DisruptedPods == nil { + m.DisruptedPods = make(map[string]v1.Time) } - if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + mapvalue := &v1.Time{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &v1.Time{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.DisruptedPods[mapkey] = *mapvalue iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisruptionsAllowed", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.DisruptionsAllowed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DisruptionsAllowed |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.CurrentHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentHealthy |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) } - var stringLen uint64 + m.DesiredHealthy = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5454,27 +1564,33 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.DesiredHealthy |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.ExpectedPods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpectedPods |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5501,8 +1617,8 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index 16301c236a..d1409913f1 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -21,7 +21,6 @@ syntax = "proto2"; package k8s.io.api.policy.v1beta1; -import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -30,35 +29,6 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/policy/v1beta1"; -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -message AllowedCSIDriver { - // Name is the registered name of the CSI driver - optional string name = 1; -} - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -message AllowedFlexVolume { - // driver is the name of the Flexvolume driver. - optional string driver = 1; -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -message AllowedHostPath { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - optional string pathPrefix = 1; - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - optional bool readOnly = 2; -} - // Eviction evicts a pod from its node subject to certain policies and safety constraints. // This is a subresource of Pod. A request to cause such an eviction is // created by POSTing to .../pods/<pod name>/evictions. @@ -72,37 +42,6 @@ message Eviction { optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -message FSGroupStrategyOptions { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -message HostPortRange { - // min is the start of the range, inclusive. - optional int32 min = 1; - - // max is the end of the range, inclusive. - optional int32 max = 2; -} - -// IDRange provides a min/max of an allowed range of IDs. -message IDRange { - // min is the start of the range, inclusive. - optional int64 min = 1; - - // max is the end of the range, inclusive. - optional int64 max = 2; -} - // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods message PodDisruptionBudget { // Standard object's metadata. @@ -238,219 +177,3 @@ message PodDisruptionBudgetStatus { repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; } -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated in 1.21. -message PodSecurityPolicy { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec defines the policy enforced. - // +optional - optional PodSecurityPolicySpec spec = 2; -} - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -message PodSecurityPolicyList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is a list of schema objects. - repeated PodSecurityPolicy items = 2; -} - -// PodSecurityPolicySpec defines the policy enforced. -message PodSecurityPolicySpec { - // privileged determines if a pod can request to be run as privileged. - // +optional - optional bool privileged = 1; - - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - repeated string defaultAddCapabilities = 2; - - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - repeated string requiredDropCapabilities = 3; - - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - repeated string allowedCapabilities = 4; - - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - repeated string volumes = 5; - - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - optional bool hostNetwork = 6; - - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - repeated HostPortRange hostPorts = 7; - - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - optional bool hostPID = 8; - - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - optional bool hostIPC = 9; - - // seLinux is the strategy that will dictate the allowable labels that may be set. - optional SELinuxStrategyOptions seLinux = 10; - - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - optional RunAsUserStrategyOptions runAsUser = 11; - - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - optional RunAsGroupStrategyOptions runAsGroup = 22; - - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - optional SupplementalGroupsStrategyOptions supplementalGroups = 12; - - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - optional FSGroupStrategyOptions fsGroup = 13; - - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - optional bool readOnlyRootFilesystem = 14; - - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - optional bool defaultAllowPrivilegeEscalation = 15; - - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - optional bool allowPrivilegeEscalation = 16; - - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - repeated AllowedHostPath allowedHostPaths = 17; - - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - repeated AllowedFlexVolume allowedFlexVolumes = 18; - - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - repeated AllowedCSIDriver allowedCSIDrivers = 23; - - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - repeated string allowedUnsafeSysctls = 19; - - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - repeated string forbiddenSysctls = 20; - - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - repeated string allowedProcMountTypes = 21; - - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - optional RuntimeClassStrategyOptions runtimeClass = 24; -} - -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -message RunAsGroupStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -message RunAsUserStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -message RuntimeClassStrategyOptions { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - repeated string allowedRuntimeClassNames = 1; - - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - optional string defaultRuntimeClassName = 2; -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -message SELinuxStrategyOptions { - // rule is the strategy that will dictate the allowable labels that may be set. - optional string rule = 1; - - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; -} - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -message SupplementalGroupsStrategyOptions { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - diff --git a/vendor/k8s.io/api/policy/v1beta1/register.go b/vendor/k8s.io/api/policy/v1beta1/register.go index b3efd6326b..d77f130407 100644 --- a/vendor/k8s.io/api/policy/v1beta1/register.go +++ b/vendor/k8s.io/api/policy/v1beta1/register.go @@ -46,8 +46,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodDisruptionBudget{}, &PodDisruptionBudgetList{}, - &PodSecurityPolicy{}, - &PodSecurityPolicyList{}, &Eviction{}, ) // Add the watch version that applies diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index 1e6b075e32..bc5f970d27 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -17,7 +17,6 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -228,373 +227,3 @@ type Eviction struct { // +optional DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` } - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.10 -// +k8s:prerelease-lifecycle-gen:deprecated=1.21 -// +k8s:prerelease-lifecycle-gen:removed=1.25 - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated in 1.21. -type PodSecurityPolicy struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines the policy enforced. - // +optional - Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodSecurityPolicySpec defines the policy enforced. -type PodSecurityPolicySpec struct { - // privileged determines if a pod can request to be run as privileged. - // +optional - Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` - // seLinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -type AllowedHostPath struct { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` -} - -// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities -// field and means that any capabilities are allowed to be requested. -var AllowAllCapabilities v1.Capability = "*" - -// FSType gives strong typing to different file systems that are used by volumes. -type FSType string - -const ( - AzureFile FSType = "azureFile" - Flocker FSType = "flocker" - FlexVolume FSType = "flexVolume" - HostPath FSType = "hostPath" - EmptyDir FSType = "emptyDir" - GCEPersistentDisk FSType = "gcePersistentDisk" - AWSElasticBlockStore FSType = "awsElasticBlockStore" - GitRepo FSType = "gitRepo" - Secret FSType = "secret" - NFS FSType = "nfs" - ISCSI FSType = "iscsi" - Glusterfs FSType = "glusterfs" - PersistentVolumeClaim FSType = "persistentVolumeClaim" - RBD FSType = "rbd" - Cinder FSType = "cinder" - CephFS FSType = "cephFS" - DownwardAPI FSType = "downwardAPI" - FC FSType = "fc" - ConfigMap FSType = "configMap" - VsphereVolume FSType = "vsphereVolume" - Quobyte FSType = "quobyte" - AzureDisk FSType = "azureDisk" - PhotonPersistentDisk FSType = "photonPersistentDisk" - StorageOS FSType = "storageos" - Projected FSType = "projected" - PortworxVolume FSType = "portworxVolume" - ScaleIO FSType = "scaleIO" - CSI FSType = "csi" - Ephemeral FSType = "ephemeral" - All FSType = "*" -) - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -type AllowedFlexVolume struct { - // driver is the name of the Flexvolume driver. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` -} - -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -type AllowedCSIDriver struct { - // Name is the registered name of the CSI driver - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -type HostPortRange struct { - // min is the start of the range, inclusive. - Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -type SELinuxStrategyOptions struct { - // rule is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` -} - -// SELinuxStrategy denotes strategy types for generating SELinux options for a -// Security Context. -type SELinuxStrategy string - -const ( - // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. - SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. - SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" -) - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -type RunAsUserStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -type RunAsGroupStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// IDRange provides a min/max of an allowed range of IDs. -type IDRange struct { - // min is the start of the range, inclusive. - Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a -// Security Context. -type RunAsUserStrategy string - -const ( - // RunAsUserStrategyMustRunAs means that container must run as a particular uid. - RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. - RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // RunAsUserStrategyRunAsAny means that container may make requests for any uid. - RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" -) - -// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a -// Security Context. -type RunAsGroupStrategy string - -const ( - // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when RunAsGroup are specified, they have to fall in the defined range. - RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" - // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. - RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" - // RunAsUserStrategyRunAsAny means that container may make requests for any gid. - RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" -) - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -type FSGroupStrategyOptions struct { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// FSGroupStrategyType denotes strategy types for generating FSGroup values for a -// SecurityContext -type FSGroupStrategyType string - -const ( - // FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied. - // However, when FSGroups are specified, they have to fall in the defined range. - FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs" - // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. - FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. - FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" -) - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -type SupplementalGroupsStrategyOptions struct { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental -// groups for a SecurityContext. -type SupplementalGroupsStrategyType string - -const ( - // SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when gids are specified, they have to fall in the defined range. - SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs" - // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. - SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. - SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" -) - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -type RuntimeClassStrategyOptions struct { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` -} - -// AllowAllRuntimeClassNames can be used as a value for the -// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is -// allowed. -const AllowAllRuntimeClassNames = "*" - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.10 -// +k8s:prerelease-lifecycle-gen:deprecated=1.21 -// +k8s:prerelease-lifecycle-gen:removed=1.25 - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -type PodSecurityPolicyList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is a list of schema objects. - Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index 266a9a853a..4a79d75949 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -27,34 +27,6 @@ package v1beta1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedCSIDriver = map[string]string{ - "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", - "name": "Name is the registered name of the CSI driver", -} - -func (AllowedCSIDriver) SwaggerDoc() map[string]string { - return map_AllowedCSIDriver -} - -var map_AllowedFlexVolume = map[string]string{ - "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", - "driver": "driver is the name of the Flexvolume driver.", -} - -func (AllowedFlexVolume) SwaggerDoc() map[string]string { - return map_AllowedFlexVolume -} - -var map_AllowedHostPath = map[string]string{ - "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.", - "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", - "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", -} - -func (AllowedHostPath) SwaggerDoc() map[string]string { - return map_AllowedHostPath -} - var map_Eviction = map[string]string{ "": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods/<pod name>/evictions.", "metadata": "ObjectMeta describes the pod that is being evicted.", @@ -65,36 +37,6 @@ func (Eviction) SwaggerDoc() map[string]string { return map_Eviction } -var map_FSGroupStrategyOptions = map[string]string{ - "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", - "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_FSGroupStrategyOptions -} - -var map_HostPortRange = map[string]string{ - "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (HostPortRange) SwaggerDoc() map[string]string { - return map_HostPortRange -} - -var map_IDRange = map[string]string{ - "": "IDRange provides a min/max of an allowed range of IDs.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (IDRange) SwaggerDoc() map[string]string { - return map_IDRange -} - var map_PodDisruptionBudget = map[string]string{ "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -143,106 +85,4 @@ func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { return map_PodDisruptionBudgetStatus } -var map_PodSecurityPolicy = map[string]string{ - "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated in 1.21.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec defines the policy enforced.", -} - -func (PodSecurityPolicy) SwaggerDoc() map[string]string { - return map_PodSecurityPolicy -} - -var map_PodSecurityPolicyList = map[string]string{ - "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is a list of schema objects.", -} - -func (PodSecurityPolicyList) SwaggerDoc() map[string]string { - return map_PodSecurityPolicyList -} - -var map_PodSecurityPolicySpec = map[string]string{ - "": "PodSecurityPolicySpec defines the policy enforced.", - "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", - "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", - "volumes": "volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", - "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", - "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", - "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", - "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", - "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", - "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", - "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", - "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", - "allowedHostPaths": "allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.", - "allowedFlexVolumes": "allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", - "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", - "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", - "allowedProcMountTypes": "AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", - "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", -} - -func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { - return map_PodSecurityPolicySpec -} - -var map_RunAsGroupStrategyOptions = map[string]string{ - "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.", - "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", - "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsGroupStrategyOptions -} - -var map_RunAsUserStrategyOptions = map[string]string{ - "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", - "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", - "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsUserStrategyOptions -} - -var map_RuntimeClassStrategyOptions = map[string]string{ - "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", - "allowedRuntimeClassNames": "allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", - "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", -} - -func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { - return map_RuntimeClassStrategyOptions -} - -var map_SELinuxStrategyOptions = map[string]string{ - "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.", - "rule": "rule is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", -} - -func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { - return map_SELinuxStrategyOptions -} - -var map_SupplementalGroupsStrategyOptions = map[string]string{ - "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", - "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { - return map_SupplementalGroupsStrategyOptions -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 8602d1adca..51895ffdb9 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -22,60 +22,11 @@ limitations under the License. package v1beta1 import ( - corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. -func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { - if in == nil { - return nil - } - out := new(AllowedCSIDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. -func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { - if in == nil { - return nil - } - out := new(AllowedFlexVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. -func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { - if in == nil { - return nil - } - out := new(AllowedHostPath) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Eviction) DeepCopyInto(out *Eviction) { *out = *in @@ -107,59 +58,6 @@ func (in *Eviction) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. -func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { - if in == nil { - return nil - } - out := new(FSGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. -func (in *HostPortRange) DeepCopy() *HostPortRange { - if in == nil { - return nil - } - out := new(HostPortRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IDRange) DeepCopyInto(out *IDRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. -func (in *IDRange) DeepCopy() *IDRange { - if in == nil { - return nil - } - out := new(IDRange) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodDisruptionBudget) DeepCopyInto(out *PodDisruptionBudget) { *out = *in @@ -286,268 +184,3 @@ func (in *PodDisruptionBudgetStatus) DeepCopy() *PodDisruptionBudgetStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. -func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { - if in == nil { - return nil - } - out := new(PodSecurityPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. -func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { - if in == nil { - return nil - } - out := new(PodSecurityPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { - *out = *in - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]FSType, len(*in)) - copy(*out, *in) - } - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - copy(*out, *in) - } - in.SELinux.DeepCopyInto(&out.SELinux) - in.RunAsUser.DeepCopyInto(&out.RunAsUser) - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(RunAsGroupStrategyOptions) - (*in).DeepCopyInto(*out) - } - in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) - in.FSGroup.DeepCopyInto(&out.FSGroup) - if in.DefaultAllowPrivilegeEscalation != nil { - in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowedHostPaths != nil { - in, out := &in.AllowedHostPaths, &out.AllowedHostPaths - *out = make([]AllowedHostPath, len(*in)) - copy(*out, *in) - } - if in.AllowedFlexVolumes != nil { - in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes - *out = make([]AllowedFlexVolume, len(*in)) - copy(*out, *in) - } - if in.AllowedCSIDrivers != nil { - in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers - *out = make([]AllowedCSIDriver, len(*in)) - copy(*out, *in) - } - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ForbiddenSysctls != nil { - in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllowedProcMountTypes != nil { - in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes - *out = make([]corev1.ProcMountType, len(*in)) - copy(*out, *in) - } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(RuntimeClassStrategyOptions) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. -func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { - if in == nil { - return nil - } - out := new(PodSecurityPolicySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. -func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. -func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsUserStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { - *out = *in - if in.AllowedRuntimeClassNames != nil { - in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DefaultRuntimeClassName != nil { - in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. -func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { - if in == nil { - return nil - } - out := new(RuntimeClassStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(corev1.SELinuxOptions) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. -func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { - if in == nil { - return nil - } - out := new(SELinuxStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. -func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { - if in == nil { - return nil - } - out := new(SupplementalGroupsStrategyOptions) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go index 612061d6cf..765a71e472 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go @@ -90,39 +90,3 @@ func (in *PodDisruptionBudgetList) APILifecycleReplacement() schema.GroupVersion func (in *PodDisruptionBudgetList) APILifecycleRemoved() (major, minor int) { return 1, 25 } - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicy) APILifecycleIntroduced() (major, minor int) { - return 1, 10 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicy) APILifecycleDeprecated() (major, minor int) { - return 1, 21 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicy) APILifecycleRemoved() (major, minor int) { - return 1, 25 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleIntroduced() (major, minor int) { - return 1, 10 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleDeprecated() (major, minor int) { - return 1, 21 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleRemoved() (major, minor int) { - return 1, 25 -} diff --git a/vendor/k8s.io/api/resource/v1alpha2/generated.proto b/vendor/k8s.io/api/resource/v1alpha2/generated.proto index 02412398c4..f7748f9a1a 100644 --- a/vendor/k8s.io/api/resource/v1alpha2/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha2/generated.proto @@ -107,7 +107,7 @@ message PodSchedulingContextSpec { // that suits all pending resources. This may get increased in the // future, but not reduced. // - // +listType=set + // +listType=atomic // +optional repeated string potentialNodes = 2; } @@ -208,7 +208,7 @@ message ResourceClaimSchedulingStatus { // PodSchedulingSpec.PotentialNodes. This may get increased in the // future, but not reduced. // - // +listType=set + // +listType=atomic // +optional repeated string unsuitableNodes = 2; } diff --git a/vendor/k8s.io/api/resource/v1alpha2/types.go b/vendor/k8s.io/api/resource/v1alpha2/types.go index 21936bfe3d..a614ff9dc1 100644 --- a/vendor/k8s.io/api/resource/v1alpha2/types.go +++ b/vendor/k8s.io/api/resource/v1alpha2/types.go @@ -248,7 +248,7 @@ type PodSchedulingContextSpec struct { // that suits all pending resources. This may get increased in the // future, but not reduced. // - // +listType=set + // +listType=atomic // +optional PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` } @@ -283,7 +283,7 @@ type ResourceClaimSchedulingStatus struct { // PodSchedulingSpec.PotentialNodes. This may get increased in the // future, but not reduced. // - // +listType=set + // +listType=atomic // +optional UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"` } diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index 5f8eccaefc..b35f708c66 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -88,7 +88,7 @@ message CSIDriverSpec { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index c785f368ef..7d7b7664b8 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -291,7 +291,7 @@ type CSIDriverSpec struct { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index c92a7f95a2..69ee683610 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -50,7 +50,7 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.", "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 1f3f380108..2b15ec3feb 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -243,10 +243,66 @@ func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo +func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} } +func (*VolumeAttributesClass) ProtoMessage() {} +func (*VolumeAttributesClass) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{7} +} +func (m *VolumeAttributesClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttributesClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttributesClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttributesClass.Merge(m, src) +} +func (m *VolumeAttributesClass) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttributesClass) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttributesClass.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttributesClass proto.InternalMessageInfo + +func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} } +func (*VolumeAttributesClassList) ProtoMessage() {} +func (*VolumeAttributesClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{8} +} +func (m *VolumeAttributesClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttributesClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttributesClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttributesClassList.Merge(m, src) +} +func (m *VolumeAttributesClassList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttributesClassList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttributesClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttributesClassList proto.InternalMessageInfo + func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{7} + return fileDescriptor_10f856db1e670dc4, []int{9} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -280,6 +336,9 @@ func init() { proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus.AttachmentMetadataEntry") + proto.RegisterType((*VolumeAttributesClass)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClass.ParametersEntry") + proto.RegisterType((*VolumeAttributesClassList)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClassList") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") } @@ -288,65 +347,71 @@ func init() { } var fileDescriptor_10f856db1e670dc4 = []byte{ - // 925 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3f, 0x6f, 0x23, 0x45, - 0x14, 0xf7, 0xc6, 0xce, 0x9d, 0x6f, 0x1c, 0xc0, 0x37, 0x32, 0x87, 0xe5, 0x93, 0xd6, 0x91, 0x2b, - 0x83, 0xb8, 0x59, 0x72, 0x20, 0x74, 0xa2, 0xf3, 0x26, 0x29, 0x22, 0x92, 0x00, 0xe3, 0x08, 0x21, - 0xa0, 0x60, 0xbc, 0x7e, 0xd8, 0x13, 0x7b, 0xff, 0x68, 0x67, 0x36, 0xc2, 0x54, 0x54, 0xd4, 0x74, - 0x7c, 0x03, 0x3e, 0x4b, 0x0a, 0x24, 0x4e, 0x54, 0x57, 0x59, 0x64, 0xf9, 0x0e, 0x14, 0x34, 0xa0, - 0x9d, 0x1d, 0xaf, 0x37, 0x5e, 0x27, 0xe7, 0x4b, 0x71, 0x9d, 0xdf, 0x9b, 0xf7, 0x7e, 0xbf, 0xf7, - 0xdf, 0x8b, 0x0e, 0x26, 0xcf, 0x04, 0xe1, 0xbe, 0x35, 0x89, 0x06, 0x10, 0x7a, 0x20, 0x41, 0x58, - 0x17, 0xe0, 0x0d, 0xfd, 0xd0, 0xd2, 0x0f, 0x2c, 0xe0, 0x96, 0x90, 0x7e, 0xc8, 0x46, 0x60, 0x5d, - 0xec, 0xb1, 0x69, 0x30, 0x66, 0x7b, 0xd6, 0x08, 0x3c, 0x08, 0x99, 0x84, 0x21, 0x09, 0x42, 0x5f, - 0xfa, 0xf8, 0x71, 0x6a, 0x4c, 0x58, 0xc0, 0x89, 0x36, 0x26, 0x0b, 0xe3, 0xd6, 0x93, 0x11, 0x97, - 0xe3, 0x68, 0x40, 0x1c, 0xdf, 0xb5, 0x46, 0xfe, 0xc8, 0xb7, 0x94, 0xcf, 0x20, 0xfa, 0x5e, 0x49, - 0x4a, 0x50, 0xbf, 0x52, 0xac, 0x56, 0x27, 0x47, 0xec, 0xf8, 0x61, 0xc2, 0xba, 0xca, 0xd7, 0xfa, - 0x68, 0x69, 0xe3, 0x32, 0x67, 0xcc, 0x3d, 0x08, 0x67, 0x56, 0x30, 0x19, 0x29, 0xa7, 0x10, 0x84, - 0x1f, 0x85, 0x0e, 0xbc, 0x92, 0x97, 0xb0, 0x5c, 0x90, 0x6c, 0x1d, 0x97, 0x75, 0x93, 0x57, 0x18, - 0x79, 0x92, 0xbb, 0x45, 0x9a, 0x8f, 0x5f, 0xe6, 0x20, 0x9c, 0x31, 0xb8, 0x6c, 0xd5, 0xaf, 0xf3, - 0x4f, 0x19, 0xe1, 0xfd, 0xfe, 0x51, 0x3f, 0xad, 0xdf, 0x3e, 0x0b, 0x98, 0xc3, 0xe5, 0x0c, 0x7f, - 0x87, 0xaa, 0x49, 0x68, 0x43, 0x26, 0x59, 0xd3, 0xd8, 0x35, 0xba, 0xb5, 0xa7, 0x1f, 0x90, 0x65, - 0xb9, 0x33, 0x06, 0x12, 0x4c, 0x46, 0x89, 0x42, 0x90, 0xc4, 0x9a, 0x5c, 0xec, 0x91, 0xcf, 0x06, - 0xe7, 0xe0, 0xc8, 0x13, 0x90, 0xcc, 0xc6, 0x97, 0xf3, 0x76, 0x29, 0x9e, 0xb7, 0xd1, 0x52, 0x47, - 0x33, 0x54, 0xcc, 0xd1, 0x8e, 0xe7, 0x0f, 0xe1, 0xcc, 0x0f, 0xfc, 0xa9, 0x3f, 0x9a, 0x35, 0xb7, - 0x14, 0xcb, 0x87, 0x9b, 0xb1, 0x1c, 0xb3, 0x01, 0x4c, 0xfb, 0x30, 0x05, 0x47, 0xfa, 0xa1, 0x5d, - 0x8f, 0xe7, 0xed, 0x9d, 0xd3, 0x1c, 0x18, 0xbd, 0x06, 0x8d, 0x0f, 0x50, 0x5d, 0xcf, 0xc7, 0xfe, - 0x94, 0x09, 0x71, 0xca, 0x5c, 0x68, 0x96, 0x77, 0x8d, 0xee, 0x03, 0xbb, 0xa9, 0x43, 0xac, 0xf7, - 0x57, 0xde, 0x69, 0xc1, 0x03, 0x7f, 0x85, 0xaa, 0x8e, 0x2e, 0x4f, 0xb3, 0xa2, 0x82, 0x25, 0xb7, - 0x05, 0x4b, 0x16, 0x13, 0x41, 0xbe, 0x88, 0x98, 0x27, 0xb9, 0x9c, 0xd9, 0x3b, 0xf1, 0xbc, 0x5d, - 0x5d, 0x94, 0x98, 0x66, 0x68, 0x58, 0xa0, 0x87, 0x2e, 0xfb, 0x81, 0xbb, 0x91, 0xfb, 0xa5, 0x3f, - 0x8d, 0x5c, 0xe8, 0xf3, 0x1f, 0xa1, 0xb9, 0x7d, 0x27, 0x8a, 0xb7, 0xe3, 0x79, 0xfb, 0xe1, 0xc9, - 0x2a, 0x18, 0x2d, 0xe2, 0x77, 0x7e, 0x37, 0xd0, 0xa3, 0x62, 0xe3, 0x8f, 0xb9, 0x90, 0xf8, 0xdb, - 0x42, 0xf3, 0xc9, 0x86, 0x6d, 0xe1, 0x22, 0x6d, 0x7d, 0x5d, 0xd7, 0xb5, 0xba, 0xd0, 0xe4, 0x1a, - 0x7f, 0x86, 0xb6, 0xb9, 0x04, 0x57, 0x34, 0xb7, 0x76, 0xcb, 0xdd, 0xda, 0x53, 0x8b, 0xdc, 0xb2, - 0xc6, 0xa4, 0x18, 0xa1, 0xfd, 0x86, 0xc6, 0xde, 0x3e, 0x4a, 0x50, 0x68, 0x0a, 0xd6, 0xf9, 0x6d, - 0x0b, 0xd5, 0xd3, 0xec, 0x7a, 0x52, 0x32, 0x67, 0xec, 0x82, 0x27, 0x5f, 0xc3, 0x14, 0xf7, 0x51, - 0x45, 0x04, 0xe0, 0xe8, 0xe9, 0xdd, 0xbb, 0x35, 0x97, 0xd5, 0xf0, 0xfa, 0x01, 0x38, 0xf6, 0x8e, - 0x86, 0xaf, 0x24, 0x12, 0x55, 0x60, 0xf8, 0x1b, 0x74, 0x4f, 0x48, 0x26, 0x23, 0xa1, 0xa6, 0xf4, - 0xfa, 0x52, 0x6c, 0x00, 0xab, 0x5c, 0xed, 0x37, 0x35, 0xf0, 0xbd, 0x54, 0xa6, 0x1a, 0xb2, 0x73, - 0x69, 0xa0, 0xc6, 0xaa, 0xcb, 0x6b, 0xe8, 0x3a, 0xbd, 0xde, 0xf5, 0x27, 0xaf, 0x94, 0xd2, 0x0d, - 0x3d, 0xff, 0xd3, 0x40, 0x8f, 0x0a, 0xd9, 0xab, 0x85, 0xc0, 0xc7, 0xa8, 0x11, 0x40, 0x28, 0xb8, - 0x90, 0xe0, 0xc9, 0xd4, 0x46, 0xad, 0xbd, 0x91, 0xae, 0x7d, 0x3c, 0x6f, 0x37, 0x3e, 0x5f, 0xf3, - 0x4e, 0xd7, 0x7a, 0xe1, 0x73, 0x54, 0xe7, 0xde, 0x94, 0x7b, 0xa0, 0xf7, 0x67, 0xd9, 0xf1, 0x6e, - 0x3e, 0x8f, 0xe4, 0x8f, 0x23, 0x29, 0xc8, 0x2a, 0xb2, 0x6a, 0x74, 0x23, 0x39, 0x33, 0x47, 0x2b, - 0x28, 0xb4, 0x80, 0xdb, 0xf9, 0x63, 0x4d, 0x7f, 0x92, 0x07, 0xfc, 0x3e, 0xaa, 0x32, 0xa5, 0x81, - 0x50, 0xa7, 0x91, 0xd5, 0xbb, 0xa7, 0xf5, 0x34, 0xb3, 0x50, 0x33, 0xa4, 0x4a, 0xb1, 0xe6, 0xb0, - 0x6e, 0x30, 0x43, 0xca, 0x35, 0x37, 0x43, 0x4a, 0xa6, 0x1a, 0x32, 0x09, 0x25, 0x39, 0xb0, 0xb9, - 0x43, 0x9a, 0x85, 0x72, 0xaa, 0xf5, 0x34, 0xb3, 0xe8, 0xfc, 0x57, 0x5e, 0xd3, 0x26, 0x35, 0x8c, - 0xb9, 0x9c, 0x86, 0x2a, 0xa7, 0x6a, 0x21, 0xa7, 0x61, 0x96, 0xd3, 0x10, 0xff, 0x6a, 0x20, 0xcc, - 0x32, 0x88, 0x93, 0xc5, 0xb0, 0xa6, 0x13, 0xf5, 0xe9, 0x1d, 0x96, 0x84, 0xf4, 0x0a, 0x68, 0x87, - 0x9e, 0x0c, 0x67, 0x76, 0x4b, 0x47, 0x81, 0x8b, 0x06, 0x74, 0x4d, 0x08, 0xf8, 0x1c, 0xd5, 0x52, - 0xed, 0x61, 0x18, 0xfa, 0xa1, 0x5e, 0xdb, 0xee, 0x06, 0x11, 0x29, 0x7b, 0xdb, 0x8c, 0xe7, 0xed, - 0x5a, 0x6f, 0x09, 0xf0, 0xef, 0xbc, 0x5d, 0xcb, 0xbd, 0xd3, 0x3c, 0x78, 0xc2, 0x35, 0x84, 0x25, - 0x57, 0xe5, 0x2e, 0x5c, 0x07, 0x70, 0x33, 0x57, 0x0e, 0xbc, 0x75, 0x88, 0xde, 0xb9, 0xa1, 0x44, - 0xb8, 0x8e, 0xca, 0x13, 0x98, 0xa5, 0x93, 0x48, 0x93, 0x9f, 0xb8, 0x81, 0xb6, 0x2f, 0xd8, 0x34, - 0x4a, 0x27, 0xee, 0x01, 0x4d, 0x85, 0x4f, 0xb6, 0x9e, 0x19, 0x9d, 0x9f, 0x0d, 0x94, 0xe7, 0xc0, - 0xc7, 0xa8, 0x92, 0x7c, 0x93, 0xe8, 0x33, 0xf3, 0xde, 0x66, 0x67, 0xe6, 0x8c, 0xbb, 0xb0, 0x3c, - 0x97, 0x89, 0x44, 0x15, 0x0a, 0x7e, 0x17, 0xdd, 0x77, 0x41, 0x08, 0x36, 0xd2, 0xcc, 0xf6, 0x5b, - 0xda, 0xe8, 0xfe, 0x49, 0xaa, 0xa6, 0x8b, 0x77, 0xbb, 0x77, 0x79, 0x65, 0x96, 0x9e, 0x5f, 0x99, - 0xa5, 0x17, 0x57, 0x66, 0xe9, 0xa7, 0xd8, 0x34, 0x2e, 0x63, 0xd3, 0x78, 0x1e, 0x9b, 0xc6, 0x8b, - 0xd8, 0x34, 0xfe, 0x8a, 0x4d, 0xe3, 0x97, 0xbf, 0xcd, 0xd2, 0xd7, 0x8f, 0x6f, 0xf9, 0x0a, 0xfd, - 0x3f, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x8d, 0x17, 0x01, 0xbc, 0x0a, 0x00, 0x00, + // 1023 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcb, 0x6e, 0x23, 0x45, + 0x17, 0x4e, 0xe7, 0x32, 0xe3, 0xa9, 0xe4, 0xff, 0xc7, 0x53, 0xca, 0x0c, 0xc6, 0x23, 0xb5, 0x23, + 0xaf, 0x0c, 0x62, 0xba, 0x49, 0x40, 0x68, 0x84, 0xc4, 0xc2, 0x9d, 0x64, 0x11, 0x91, 0x84, 0xa1, + 0x1c, 0x01, 0x02, 0x16, 0x94, 0xdb, 0x07, 0xbb, 0x62, 0xf7, 0x45, 0x55, 0xd5, 0x16, 0x66, 0xc5, + 0x8a, 0x35, 0x3b, 0xde, 0x80, 0x67, 0xc9, 0x02, 0x89, 0xd1, 0xac, 0x66, 0x65, 0x91, 0x86, 0x67, + 0x60, 0xc1, 0x06, 0xd4, 0xd5, 0xe5, 0x76, 0xc7, 0x6d, 0x07, 0x27, 0x8b, 0xec, 0x5c, 0xe7, 0xf2, + 0x9d, 0xdb, 0x77, 0x4e, 0x27, 0xe8, 0xa0, 0xff, 0x5c, 0x58, 0x2c, 0xb0, 0xfb, 0x51, 0x1b, 0xb8, + 0x0f, 0x12, 0x84, 0x3d, 0x04, 0xbf, 0x13, 0x70, 0x5b, 0x2b, 0x68, 0xc8, 0x6c, 0x21, 0x03, 0x4e, + 0xbb, 0x60, 0x0f, 0x77, 0xe9, 0x20, 0xec, 0xd1, 0x5d, 0xbb, 0x0b, 0x3e, 0x70, 0x2a, 0xa1, 0x63, + 0x85, 0x3c, 0x90, 0x01, 0x7e, 0x9a, 0x1a, 0x5b, 0x34, 0x64, 0x96, 0x36, 0xb6, 0x26, 0xc6, 0xd5, + 0x67, 0x5d, 0x26, 0x7b, 0x51, 0xdb, 0x72, 0x03, 0xcf, 0xee, 0x06, 0xdd, 0xc0, 0x56, 0x3e, 0xed, + 0xe8, 0x5b, 0xf5, 0x52, 0x0f, 0xf5, 0x2b, 0xc5, 0xaa, 0xd6, 0x73, 0x81, 0xdd, 0x80, 0x27, 0x51, + 0x67, 0xe3, 0x55, 0xdf, 0x9f, 0xda, 0x78, 0xd4, 0xed, 0x31, 0x1f, 0xf8, 0xc8, 0x0e, 0xfb, 0x5d, + 0xe5, 0xc4, 0x41, 0x04, 0x11, 0x77, 0xe1, 0x46, 0x5e, 0xc2, 0xf6, 0x40, 0xd2, 0x79, 0xb1, 0xec, + 0x45, 0x5e, 0x3c, 0xf2, 0x25, 0xf3, 0x8a, 0x61, 0x3e, 0xf8, 0x2f, 0x07, 0xe1, 0xf6, 0xc0, 0xa3, + 0xb3, 0x7e, 0xf5, 0xbf, 0xd6, 0x10, 0xde, 0x6f, 0x1d, 0xb5, 0xd2, 0xfe, 0xed, 0xd3, 0x90, 0xba, + 0x4c, 0x8e, 0xf0, 0x37, 0xa8, 0x94, 0xa4, 0xd6, 0xa1, 0x92, 0x56, 0x8c, 0x1d, 0xa3, 0xb1, 0xb9, + 0xf7, 0xae, 0x35, 0x6d, 0x77, 0x16, 0xc1, 0x0a, 0xfb, 0xdd, 0x44, 0x20, 0xac, 0xc4, 0xda, 0x1a, + 0xee, 0x5a, 0x9f, 0xb4, 0xcf, 0xc1, 0x95, 0x27, 0x20, 0xa9, 0x83, 0x2f, 0xc6, 0xb5, 0x95, 0x78, + 0x5c, 0x43, 0x53, 0x19, 0xc9, 0x50, 0x31, 0x43, 0x5b, 0x7e, 0xd0, 0x81, 0xb3, 0x20, 0x0c, 0x06, + 0x41, 0x77, 0x54, 0x59, 0x55, 0x51, 0xde, 0x5b, 0x2e, 0xca, 0x31, 0x6d, 0xc3, 0xa0, 0x05, 0x03, + 0x70, 0x65, 0xc0, 0x9d, 0x72, 0x3c, 0xae, 0x6d, 0x9d, 0xe6, 0xc0, 0xc8, 0x15, 0x68, 0x7c, 0x80, + 0xca, 0x9a, 0x1f, 0xfb, 0x03, 0x2a, 0xc4, 0x29, 0xf5, 0xa0, 0xb2, 0xb6, 0x63, 0x34, 0x1e, 0x38, + 0x15, 0x9d, 0x62, 0xb9, 0x35, 0xa3, 0x27, 0x05, 0x0f, 0xfc, 0x05, 0x2a, 0xb9, 0xba, 0x3d, 0x95, + 0x75, 0x95, 0xac, 0x75, 0x5d, 0xb2, 0xd6, 0x84, 0x11, 0xd6, 0xa7, 0x11, 0xf5, 0x25, 0x93, 0x23, + 0x67, 0x2b, 0x1e, 0xd7, 0x4a, 0x93, 0x16, 0x93, 0x0c, 0x0d, 0x0b, 0xf4, 0xc8, 0xa3, 0xdf, 0x31, + 0x2f, 0xf2, 0x3e, 0x0b, 0x06, 0x91, 0x07, 0x2d, 0xf6, 0x3d, 0x54, 0x36, 0x6e, 0x15, 0xe2, 0x71, + 0x3c, 0xae, 0x3d, 0x3a, 0x99, 0x05, 0x23, 0x45, 0xfc, 0xfa, 0xaf, 0x06, 0x7a, 0x52, 0x1c, 0xfc, + 0x31, 0x13, 0x12, 0x7f, 0x5d, 0x18, 0xbe, 0xb5, 0xe4, 0x58, 0x98, 0x48, 0x47, 0x5f, 0xd6, 0x7d, + 0x2d, 0x4d, 0x24, 0xb9, 0xc1, 0x9f, 0xa1, 0x0d, 0x26, 0xc1, 0x13, 0x95, 0xd5, 0x9d, 0xb5, 0xc6, + 0xe6, 0x9e, 0x6d, 0x5d, 0xb3, 0xc6, 0x56, 0x31, 0x43, 0xe7, 0x7f, 0x1a, 0x7b, 0xe3, 0x28, 0x41, + 0x21, 0x29, 0x58, 0xfd, 0x97, 0x55, 0x54, 0x4e, 0xab, 0x6b, 0x4a, 0x49, 0xdd, 0x9e, 0x07, 0xbe, + 0xbc, 0x03, 0x16, 0xb7, 0xd0, 0xba, 0x08, 0xc1, 0xd5, 0xec, 0xdd, 0xbd, 0xb6, 0x96, 0xd9, 0xf4, + 0x5a, 0x21, 0xb8, 0xce, 0x96, 0x86, 0x5f, 0x4f, 0x5e, 0x44, 0x81, 0xe1, 0xaf, 0xd0, 0x3d, 0x21, + 0xa9, 0x8c, 0x84, 0x62, 0xe9, 0xd5, 0xa5, 0x58, 0x02, 0x56, 0xb9, 0x3a, 0xff, 0xd7, 0xc0, 0xf7, + 0xd2, 0x37, 0xd1, 0x90, 0xf5, 0x0b, 0x03, 0x6d, 0xcf, 0xba, 0xdc, 0xc1, 0xd4, 0xc9, 0xd5, 0xa9, + 0x3f, 0xbb, 0x51, 0x49, 0x0b, 0x66, 0xfe, 0xca, 0x40, 0x4f, 0x0a, 0xd5, 0xab, 0x85, 0xc0, 0xc7, + 0x68, 0x3b, 0x04, 0x2e, 0x98, 0x90, 0xe0, 0xcb, 0xd4, 0x46, 0xad, 0xbd, 0x91, 0xae, 0x7d, 0x3c, + 0xae, 0x6d, 0xbf, 0x98, 0xa3, 0x27, 0x73, 0xbd, 0xf0, 0x39, 0x2a, 0x33, 0x7f, 0xc0, 0x7c, 0xd0, + 0xfb, 0x33, 0x9d, 0x78, 0x23, 0x5f, 0x47, 0xf2, 0xe1, 0x48, 0x1a, 0x32, 0x8b, 0xac, 0x06, 0xbd, + 0x9d, 0x9c, 0x99, 0xa3, 0x19, 0x14, 0x52, 0xc0, 0xad, 0xff, 0x36, 0x67, 0x3e, 0x89, 0x02, 0xbf, + 0x83, 0x4a, 0x54, 0x49, 0x80, 0xeb, 0x32, 0xb2, 0x7e, 0x37, 0xb5, 0x9c, 0x64, 0x16, 0x8a, 0x43, + 0xaa, 0x15, 0x73, 0x0e, 0xeb, 0x12, 0x1c, 0x52, 0xae, 0x39, 0x0e, 0xa9, 0x37, 0xd1, 0x90, 0x49, + 0x2a, 0xc9, 0x81, 0xcd, 0x1d, 0xd2, 0x2c, 0x95, 0x53, 0x2d, 0x27, 0x99, 0x45, 0xfd, 0x9f, 0xb5, + 0x39, 0x63, 0x52, 0x64, 0xcc, 0xd5, 0xd4, 0x51, 0x35, 0x95, 0x0a, 0x35, 0x75, 0xb2, 0x9a, 0x3a, + 0xf8, 0x67, 0x03, 0x61, 0x9a, 0x41, 0x9c, 0x4c, 0xc8, 0x9a, 0x32, 0xea, 0xe3, 0x5b, 0x2c, 0x89, + 0xd5, 0x2c, 0xa0, 0x1d, 0xfa, 0x92, 0x8f, 0x9c, 0xaa, 0xce, 0x02, 0x17, 0x0d, 0xc8, 0x9c, 0x14, + 0xf0, 0x39, 0xda, 0x4c, 0xa5, 0x87, 0x9c, 0x07, 0x5c, 0xaf, 0x6d, 0x63, 0x89, 0x8c, 0x94, 0xbd, + 0x63, 0xc6, 0xe3, 0xda, 0x66, 0x73, 0x0a, 0xf0, 0xf7, 0xb8, 0xb6, 0x99, 0xd3, 0x93, 0x3c, 0x78, + 0x12, 0xab, 0x03, 0xd3, 0x58, 0xeb, 0xb7, 0x89, 0x75, 0x00, 0x8b, 0x63, 0xe5, 0xc0, 0xab, 0x87, + 0xe8, 0x8d, 0x05, 0x2d, 0xc2, 0x65, 0xb4, 0xd6, 0x87, 0x51, 0xca, 0x44, 0x92, 0xfc, 0xc4, 0xdb, + 0x68, 0x63, 0x48, 0x07, 0x51, 0xca, 0xb8, 0x07, 0x24, 0x7d, 0x7c, 0xb8, 0xfa, 0xdc, 0xa8, 0xff, + 0xb9, 0x8a, 0x1e, 0x67, 0x13, 0xe0, 0xac, 0x1d, 0x49, 0x10, 0xea, 0xc3, 0x7a, 0x07, 0x17, 0x7a, + 0x0f, 0xa1, 0x0e, 0x67, 0x43, 0xe0, 0x8a, 0xad, 0x2a, 0xb5, 0xa9, 0xc7, 0x41, 0xa6, 0x21, 0x39, + 0x2b, 0x3c, 0x44, 0x28, 0xa4, 0x9c, 0x7a, 0x20, 0x81, 0x27, 0x47, 0x38, 0xe1, 0x97, 0xb3, 0x1c, + 0xbf, 0xf2, 0xd5, 0x59, 0x2f, 0x32, 0x90, 0x94, 0x56, 0x59, 0xdc, 0xa9, 0x82, 0xe4, 0x22, 0x55, + 0x3f, 0x42, 0x0f, 0x67, 0x5c, 0x6e, 0xd4, 0xe6, 0x57, 0x06, 0x7a, 0x73, 0x6e, 0x22, 0x77, 0x70, + 0xdf, 0x3f, 0xbf, 0x7a, 0xdf, 0xf7, 0x6e, 0xde, 0xad, 0x05, 0x47, 0xfe, 0x47, 0x03, 0xe5, 0xf9, + 0x89, 0x8f, 0xd1, 0x7a, 0xf2, 0xf7, 0xac, 0x2e, 0xe1, 0xed, 0xe5, 0x4a, 0x38, 0x63, 0x1e, 0x4c, + 0x3f, 0xb5, 0xc9, 0x8b, 0x28, 0x14, 0xfc, 0x16, 0xba, 0xef, 0x81, 0x10, 0xb4, 0x3b, 0xa1, 0xc6, + 0x43, 0x6d, 0x74, 0xff, 0x24, 0x15, 0x93, 0x89, 0xde, 0x69, 0x5e, 0x5c, 0x9a, 0x2b, 0x2f, 0x2f, + 0xcd, 0x95, 0xd7, 0x97, 0xe6, 0xca, 0x0f, 0xb1, 0x69, 0x5c, 0xc4, 0xa6, 0xf1, 0x32, 0x36, 0x8d, + 0xd7, 0xb1, 0x69, 0xfc, 0x1e, 0x9b, 0xc6, 0x4f, 0x7f, 0x98, 0x2b, 0x5f, 0x3e, 0xbd, 0xe6, 0x3f, + 0x98, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x2f, 0x75, 0xee, 0xf8, 0x0c, 0x00, 0x00, } func (m *CSIStorageCapacity) Marshal() (dAtA []byte, err error) { @@ -734,6 +799,115 @@ func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *VolumeAttributesClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttributesClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttributesClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DriverName) + copy(dAtA[i:], m.DriverName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttributesClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttributesClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttributesClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -915,6 +1089,44 @@ func (m *VolumeAttachmentStatus) Size() (n int) { return n } +func (m *VolumeAttributesClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DriverName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *VolumeAttributesClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *VolumeError) Size() (n int) { if m == nil { return 0 @@ -1038,6 +1250,44 @@ func (this *VolumeAttachmentStatus) String() string { }, "") return s } +func (this *VolumeAttributesClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&VolumeAttributesClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttributesClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]VolumeAttributesClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttributesClass", "VolumeAttributesClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&VolumeAttributesClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} func (this *VolumeError) String() string { if this == nil { return "nil" @@ -2198,6 +2448,365 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *VolumeAttributesClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttributesClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttributesClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttributesClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttributesClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttributesClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttributesClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *VolumeError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index 88250a0f01..49e522be53 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -216,6 +216,46 @@ message VolumeAttachmentStatus { optional VolumeError detachError = 4; } +// VolumeAttributesClass represents a specification of mutable volume attributes +// defined by the CSI driver. The class can be specified during dynamic provisioning +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. +message VolumeAttributesClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Name of the CSI driver + // This field is immutable. + optional string driverName = 2; + + // parameters hold volume attributes defined by the CSI driver. These values + // are opaque to the Kubernetes and are passed directly to the CSI driver. + // The underlying storage provider supports changing these attributes on an + // existing volume, however the parameters field itself is immutable. To + // invoke a volume update, a new VolumeAttributesClass should be created with + // new parameters, and the PersistentVolumeClaim should be updated to reference + // the new VolumeAttributesClass. + // + // This field is required and must contain at least one key/value pair. + // The keys cannot be empty, and the maximum number of parameters is 512, with + // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, + // the target PersistentVolumeClaim will be set to an "Infeasible" state in the + // modifyVolumeStatus field. + map<string, string> parameters = 3; +} + +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. +message VolumeAttributesClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of VolumeAttributesClass objects. + repeated VolumeAttributesClass items = 2; +} + // VolumeError captures an error encountered during a volume operation. message VolumeError { // time represents the time the error was encountered. diff --git a/vendor/k8s.io/api/storage/v1alpha1/register.go b/vendor/k8s.io/api/storage/v1alpha1/register.go index 779c858028..a70f8e1863 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/register.go +++ b/vendor/k8s.io/api/storage/v1alpha1/register.go @@ -45,6 +45,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &VolumeAttachmentList{}, &CSIStorageCapacity{}, &CSIStorageCapacityList{}, + &VolumeAttributesClass{}, + &VolumeAttributesClassList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index 59ef348a31..5957e48074 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -251,3 +251,55 @@ type CSIStorageCapacityList struct { // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:prerelease-lifecycle-gen:introduced=1.29 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttributesClass represents a specification of mutable volume attributes +// defined by the CSI driver. The class can be specified during dynamic provisioning +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. +type VolumeAttributesClass struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Name of the CSI driver + // This field is immutable. + DriverName string `json:"driverName" protobuf:"bytes,2,opt,name=driverName"` + + // parameters hold volume attributes defined by the CSI driver. These values + // are opaque to the Kubernetes and are passed directly to the CSI driver. + // The underlying storage provider supports changing these attributes on an + // existing volume, however the parameters field itself is immutable. To + // invoke a volume update, a new VolumeAttributesClass should be created with + // new parameters, and the PersistentVolumeClaim should be updated to reference + // the new VolumeAttributesClass. + // + // This field is required and must contain at least one key/value pair. + // The keys cannot be empty, and the maximum number of parameters is 512, with + // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, + // the target PersistentVolumeClaim will be set to an "Infeasible" state in the + // modifyVolumeStatus field. + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` +} + +// +k8s:prerelease-lifecycle-gen:introduced=1.29 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. +type VolumeAttributesClassList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of VolumeAttributesClass objects. + Items []VolumeAttributesClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index ba6afbd591..ac87dbdca3 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -103,6 +103,27 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { return map_VolumeAttachmentStatus } +var map_VolumeAttributesClass = map[string]string{ + "": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "driverName": "Name of the CSI driver This field is immutable.", + "parameters": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", +} + +func (VolumeAttributesClass) SwaggerDoc() map[string]string { + return map_VolumeAttributesClass +} + +var map_VolumeAttributesClassList = map[string]string{ + "": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of VolumeAttributesClass objects.", +} + +func (VolumeAttributesClassList) SwaggerDoc() map[string]string { + return map_VolumeAttributesClassList +} + var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", "time": "time represents the time the error was encountered.", diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go index d9bc94b250..942871f788 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -238,6 +238,72 @@ func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttributesClass) DeepCopyInto(out *VolumeAttributesClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClass. +func (in *VolumeAttributesClass) DeepCopy() *VolumeAttributesClass { + if in == nil { + return nil + } + out := new(VolumeAttributesClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttributesClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttributesClassList) DeepCopyInto(out *VolumeAttributesClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttributesClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClassList. +func (in *VolumeAttributesClassList) DeepCopy() *VolumeAttributesClassList { + if in == nil { + return nil + } + out := new(VolumeAttributesClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttributesClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeError) DeepCopyInto(out *VolumeError) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go index 41114c3c68..c169e782ce 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -120,3 +120,39 @@ func (in *VolumeAttachmentList) APILifecycleReplacement() schema.GroupVersionKin func (in *VolumeAttachmentList) APILifecycleRemoved() (major, minor int) { return 1, 24 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttributesClass) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *VolumeAttributesClass) APILifecycleDeprecated() (major, minor int) { + return 1, 32 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *VolumeAttributesClass) APILifecycleRemoved() (major, minor int) { + return 1, 35 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttributesClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *VolumeAttributesClassList) APILifecycleDeprecated() (major, minor int) { + return 1, 32 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *VolumeAttributesClassList) APILifecycleRemoved() (major, minor int) { + return 1, 35 +} diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index 2b354dd471..b99fd39e48 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -91,7 +91,7 @@ message CSIDriverSpec { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index 4c39b49ccd..0f5ade3c13 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -311,7 +311,7 @@ type CSIDriverSpec struct { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 0f2718b9c1..6d9d233066 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -50,7 +50,7 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go index cc1c7437fc..8c4e147f0b 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go @@ -210,6 +210,19 @@ type ValidationRule struct { // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with // non-intersecting keys are appended, retaining their partial order. + // + // If `rule` makes use of the `oldSelf` variable it is implicitly a + // `transition rule`. + // + // By default, the `oldSelf` variable is the same type as `self`. + // When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional + // variable whose value() is the same type as `self`. + // See the documentation for the `optionalOldSelf` field for details. + // + // Transition rules by default are applied only on UPDATE requests and are + // skipped if an old value could not be found. You can opt a transition + // rule into unconditional evaluation by setting `optionalOldSelf` to true. + // Rule string // Message represents the message displayed when validation fails. The message is required if the Rule contains // line breaks. The message must not contain line breaks. @@ -246,6 +259,24 @@ type ValidationRule struct { // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` // +optional FieldPath string + + // optionalOldSelf is used to opt a transition rule into evaluation + // even when the object is first created, or if the old object is + // missing the value. + // + // When enabled `oldSelf` will be a CEL optional whose value will be + // `None` if there is no old value, or when the object is initially created. + // + // You may check for presence of oldSelf using `oldSelf.hasValue()` and + // unwrap it after checking using `oldSelf.value()`. Check the CEL + // documentation for Optional types for more information: + // https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes + // + // May not be set unless `oldSelf` is used in `rule`. + // + // +featureGate=CRDValidationRatcheting + // +optional + OptionalOldSelf *bool } // JSON represents any valid JSON value. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go index 75a573a2d2..6c22a51698 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go @@ -814,202 +814,204 @@ func init() { } var fileDescriptor_f5a35c9667703937 = []byte{ - // 3111 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdd, 0x6f, 0x5c, 0x47, - 0x15, 0xcf, 0x5d, 0x7b, 0xed, 0xf5, 0xd8, 0x89, 0xed, 0x49, 0x6c, 0x6e, 0xdc, 0xc4, 0xeb, 0x6c, - 0x68, 0x70, 0xdb, 0x74, 0xdd, 0x9a, 0x96, 0x86, 0x82, 0x40, 0x5e, 0xdb, 0x69, 0xdd, 0xd8, 0xb1, - 0x35, 0x9b, 0xa4, 0x6e, 0x8b, 0x68, 0xaf, 0xf7, 0x8e, 0xd7, 0xb7, 0xbe, 0x5f, 0x99, 0xb9, 0xd7, - 0x1f, 0x12, 0x48, 0x15, 0xa8, 0x02, 0x2a, 0x41, 0x79, 0xa8, 0xca, 0x13, 0x42, 0x08, 0xf5, 0x01, - 0x1e, 0xe0, 0x0d, 0xfe, 0x85, 0xbe, 0x20, 0xf5, 0x09, 0x55, 0x42, 0x5a, 0xd1, 0xe5, 0x1f, 0x40, - 0x02, 0x84, 0xf0, 0x03, 0x42, 0xf3, 0x71, 0xe7, 0xce, 0xde, 0xdd, 0x4d, 0x22, 0x7b, 0xdd, 0xbe, - 0xed, 0x9e, 0x73, 0xe6, 0xfc, 0xce, 0x9c, 0x39, 0x73, 0xe6, 0xcc, 0xb9, 0x03, 0xac, 0xdd, 0x1b, - 0xb4, 0xec, 0x04, 0x73, 0xbb, 0xf1, 0x16, 0x26, 0x3e, 0x8e, 0x30, 0x9d, 0xdb, 0xc3, 0xbe, 0x1d, - 0x90, 0x39, 0xc9, 0xb0, 0x42, 0x07, 0x1f, 0x44, 0xd8, 0xa7, 0x4e, 0xe0, 0xd3, 0xa7, 0xad, 0xd0, - 0xa1, 0x98, 0xec, 0x61, 0x32, 0x17, 0xee, 0xd6, 0x19, 0x8f, 0xb6, 0x0a, 0xcc, 0xed, 0x3d, 0x3b, - 0x57, 0xc7, 0x3e, 0x26, 0x56, 0x84, 0xed, 0x72, 0x48, 0x82, 0x28, 0x80, 0x37, 0x84, 0xa6, 0x72, - 0x8b, 0xe0, 0x9b, 0x4a, 0x53, 0x39, 0xdc, 0xad, 0x33, 0x1e, 0x6d, 0x15, 0x28, 0xef, 0x3d, 0x3b, - 0xf5, 0x74, 0xdd, 0x89, 0x76, 0xe2, 0xad, 0x72, 0x2d, 0xf0, 0xe6, 0xea, 0x41, 0x3d, 0x98, 0xe3, - 0x0a, 0xb7, 0xe2, 0x6d, 0xfe, 0x8f, 0xff, 0xe1, 0xbf, 0x04, 0xd0, 0xd4, 0x73, 0xa9, 0xc9, 0x9e, - 0x55, 0xdb, 0x71, 0x7c, 0x4c, 0x0e, 0x53, 0x3b, 0x3d, 0x1c, 0x59, 0x1d, 0xcc, 0x9b, 0x9a, 0xeb, - 0x36, 0x8a, 0xc4, 0x7e, 0xe4, 0x78, 0xb8, 0x6d, 0xc0, 0xd7, 0x1e, 0x36, 0x80, 0xd6, 0x76, 0xb0, - 0x67, 0x65, 0xc7, 0x95, 0x8e, 0x0c, 0x30, 0xbe, 0x18, 0xf8, 0x7b, 0x98, 0xb0, 0x09, 0x22, 0x7c, - 0x3f, 0xc6, 0x34, 0x82, 0x15, 0xd0, 0x17, 0x3b, 0xb6, 0x69, 0xcc, 0x18, 0xb3, 0x43, 0x95, 0x67, - 0x3e, 0x6e, 0x14, 0xcf, 0x34, 0x1b, 0xc5, 0xbe, 0xbb, 0x2b, 0x4b, 0x47, 0x8d, 0xe2, 0x95, 0x6e, - 0x48, 0xd1, 0x61, 0x88, 0x69, 0xf9, 0xee, 0xca, 0x12, 0x62, 0x83, 0xe1, 0x4b, 0x60, 0xdc, 0xc6, - 0xd4, 0x21, 0xd8, 0x5e, 0xd8, 0x58, 0xb9, 0x27, 0xf4, 0x9b, 0x39, 0xae, 0xf1, 0xa2, 0xd4, 0x38, - 0xbe, 0x94, 0x15, 0x40, 0xed, 0x63, 0xe0, 0x26, 0x18, 0x0c, 0xb6, 0xde, 0xc6, 0xb5, 0x88, 0x9a, - 0x7d, 0x33, 0x7d, 0xb3, 0xc3, 0xf3, 0x4f, 0x97, 0xd3, 0xc5, 0x53, 0x26, 0xf0, 0x15, 0x93, 0x93, - 0x2d, 0x23, 0x6b, 0x7f, 0x39, 0x59, 0xb4, 0xca, 0xa8, 0x44, 0x1b, 0x5c, 0x17, 0x5a, 0x50, 0xa2, - 0xae, 0xf4, 0x9b, 0x1c, 0x80, 0xfa, 0xe4, 0x69, 0x18, 0xf8, 0x14, 0xf7, 0x64, 0xf6, 0x14, 0x8c, - 0xd5, 0xb8, 0xe6, 0x08, 0xdb, 0x12, 0xd7, 0xcc, 0x1d, 0xc7, 0x7a, 0x53, 0xe2, 0x8f, 0x2d, 0x66, - 0xd4, 0xa1, 0x36, 0x00, 0x78, 0x07, 0x0c, 0x10, 0x4c, 0x63, 0x37, 0x32, 0xfb, 0x66, 0x8c, 0xd9, - 0xe1, 0xf9, 0xeb, 0x5d, 0xa1, 0x78, 0x68, 0xb3, 0xe0, 0x2b, 0xef, 0x3d, 0x5b, 0xae, 0x46, 0x56, - 0x14, 0xd3, 0xca, 0x39, 0x89, 0x34, 0x80, 0xb8, 0x0e, 0x24, 0x75, 0x95, 0xfe, 0x67, 0x80, 0x31, - 0xdd, 0x4b, 0x7b, 0x0e, 0xde, 0x87, 0x04, 0x0c, 0x12, 0x11, 0x2c, 0xdc, 0x4f, 0xc3, 0xf3, 0xb7, - 0xca, 0xc7, 0xdd, 0x51, 0xe5, 0xb6, 0xf8, 0xab, 0x0c, 0xb3, 0xe5, 0x92, 0x7f, 0x50, 0x02, 0x04, - 0xf7, 0x40, 0x81, 0xc8, 0x35, 0xe2, 0x81, 0x34, 0x3c, 0xbf, 0xda, 0x1b, 0x50, 0xa1, 0xb3, 0x32, - 0xd2, 0x6c, 0x14, 0x0b, 0xc9, 0x3f, 0xa4, 0xb0, 0x4a, 0xbf, 0xca, 0x81, 0xe9, 0xc5, 0x98, 0x46, - 0x81, 0x87, 0x30, 0x0d, 0x62, 0x52, 0xc3, 0x8b, 0x81, 0x1b, 0x7b, 0xfe, 0x12, 0xde, 0x76, 0x7c, - 0x27, 0x62, 0x31, 0x3a, 0x03, 0xfa, 0x7d, 0xcb, 0xc3, 0x32, 0x66, 0x46, 0xa4, 0x27, 0xfb, 0x6f, - 0x5b, 0x1e, 0x46, 0x9c, 0xc3, 0x24, 0x58, 0x88, 0xc8, 0x1d, 0xa0, 0x24, 0xee, 0x1c, 0x86, 0x18, - 0x71, 0x0e, 0xbc, 0x06, 0x06, 0xb6, 0x03, 0xe2, 0x59, 0x62, 0xf5, 0x86, 0xd2, 0xf5, 0xb8, 0xc9, - 0xa9, 0x48, 0x72, 0xe1, 0xf3, 0x60, 0xd8, 0xc6, 0xb4, 0x46, 0x9c, 0x90, 0x41, 0x9b, 0xfd, 0x5c, - 0xf8, 0xbc, 0x14, 0x1e, 0x5e, 0x4a, 0x59, 0x48, 0x97, 0x83, 0xd7, 0x41, 0x21, 0x24, 0x4e, 0x40, - 0x9c, 0xe8, 0xd0, 0xcc, 0xcf, 0x18, 0xb3, 0xf9, 0xca, 0x98, 0x1c, 0x53, 0xd8, 0x90, 0x74, 0xa4, - 0x24, 0x98, 0xf4, 0xdb, 0x34, 0xf0, 0x37, 0xac, 0x68, 0xc7, 0x1c, 0xe0, 0x08, 0x4a, 0xfa, 0x95, - 0xea, 0xfa, 0x6d, 0x46, 0x47, 0x4a, 0xa2, 0xf4, 0x17, 0x03, 0x98, 0x59, 0x0f, 0x25, 0xee, 0x85, - 0x37, 0x41, 0x81, 0x46, 0x2c, 0xe7, 0xd4, 0x0f, 0xa5, 0x7f, 0x9e, 0x4c, 0x54, 0x55, 0x25, 0xfd, - 0xa8, 0x51, 0x9c, 0x4c, 0x47, 0x24, 0x54, 0xee, 0x1b, 0x35, 0x96, 0x85, 0xdc, 0x3e, 0xde, 0xda, - 0x09, 0x82, 0x5d, 0xb9, 0xfa, 0x27, 0x08, 0xb9, 0x57, 0x85, 0xa2, 0x14, 0x53, 0x84, 0x9c, 0x24, - 0xa3, 0x04, 0xa8, 0xf4, 0xdf, 0x5c, 0x76, 0x62, 0xda, 0xa2, 0xbf, 0x05, 0x0a, 0x6c, 0x0b, 0xd9, - 0x56, 0x64, 0xc9, 0x4d, 0xf0, 0xcc, 0xa3, 0x6d, 0x38, 0xb1, 0x5f, 0xd7, 0x70, 0x64, 0x55, 0xa0, - 0x74, 0x05, 0x48, 0x69, 0x48, 0x69, 0x85, 0x07, 0xa0, 0x9f, 0x86, 0xb8, 0x26, 0xe7, 0x7b, 0xef, - 0x04, 0xd1, 0xde, 0x65, 0x0e, 0xd5, 0x10, 0xd7, 0xd2, 0x60, 0x64, 0xff, 0x10, 0x47, 0x84, 0xef, - 0x18, 0x60, 0x80, 0xf2, 0xbc, 0x20, 0x73, 0xc9, 0xe6, 0x29, 0x80, 0x67, 0xf2, 0x8e, 0xf8, 0x8f, - 0x24, 0x6e, 0xe9, 0x5f, 0x39, 0x70, 0xa5, 0xdb, 0xd0, 0xc5, 0xc0, 0xb7, 0xc5, 0x22, 0xac, 0xc8, - 0x7d, 0x25, 0x22, 0xeb, 0x79, 0x7d, 0x5f, 0x1d, 0x35, 0x8a, 0x8f, 0x3f, 0x54, 0x81, 0xb6, 0x01, - 0xbf, 0xae, 0xa6, 0x2c, 0x36, 0xe9, 0x95, 0x56, 0xc3, 0x8e, 0x1a, 0xc5, 0x51, 0x35, 0xac, 0xd5, - 0x56, 0xb8, 0x07, 0xa0, 0x6b, 0xd1, 0xe8, 0x0e, 0xb1, 0x7c, 0x2a, 0xd4, 0x3a, 0x1e, 0x96, 0x9e, - 0x7b, 0xf2, 0xd1, 0x82, 0x82, 0x8d, 0xa8, 0x4c, 0x49, 0x48, 0xb8, 0xda, 0xa6, 0x0d, 0x75, 0x40, - 0x60, 0x39, 0x83, 0x60, 0x8b, 0xaa, 0x34, 0xa0, 0xe5, 0x70, 0x46, 0x45, 0x92, 0x0b, 0x9f, 0x00, - 0x83, 0x1e, 0xa6, 0xd4, 0xaa, 0x63, 0xbe, 0xf7, 0x87, 0xd2, 0x43, 0x71, 0x4d, 0x90, 0x51, 0xc2, - 0x2f, 0xfd, 0xdb, 0x00, 0x97, 0xba, 0x79, 0x6d, 0xd5, 0xa1, 0x11, 0xfc, 0x4e, 0x5b, 0xd8, 0x97, - 0x1f, 0x6d, 0x86, 0x6c, 0x34, 0x0f, 0x7a, 0x95, 0x4a, 0x12, 0x8a, 0x16, 0xf2, 0xfb, 0x20, 0xef, - 0x44, 0xd8, 0x4b, 0x4e, 0x4b, 0xd4, 0xfb, 0xb0, 0xab, 0x9c, 0x95, 0xf0, 0xf9, 0x15, 0x06, 0x84, - 0x04, 0x5e, 0xe9, 0xa3, 0x1c, 0xb8, 0xdc, 0x6d, 0x08, 0xcb, 0xe3, 0x94, 0x39, 0x3b, 0x74, 0x63, - 0x62, 0xb9, 0x32, 0xd8, 0x94, 0xb3, 0x37, 0x38, 0x15, 0x49, 0x2e, 0xcb, 0x9d, 0xd4, 0xf1, 0xeb, - 0xb1, 0x6b, 0x11, 0x19, 0x49, 0x6a, 0xc2, 0x55, 0x49, 0x47, 0x4a, 0x02, 0x96, 0x01, 0xa0, 0x3b, - 0x01, 0x89, 0x38, 0x06, 0xaf, 0x70, 0x86, 0x2a, 0xe7, 0x58, 0x46, 0xa8, 0x2a, 0x2a, 0xd2, 0x24, - 0xd8, 0x41, 0xb2, 0xeb, 0xf8, 0xb6, 0x5c, 0x70, 0xb5, 0x77, 0x6f, 0x39, 0xbe, 0x8d, 0x38, 0x87, - 0xe1, 0xbb, 0x0e, 0x8d, 0x18, 0x45, 0xae, 0x76, 0x8b, 0xc3, 0xb9, 0xa4, 0x92, 0x60, 0xf8, 0x35, - 0x96, 0x60, 0x03, 0xe2, 0x60, 0x6a, 0x0e, 0xa4, 0xf8, 0x8b, 0x8a, 0x8a, 0x34, 0x89, 0xd2, 0x5f, - 0xfb, 0xbb, 0xc7, 0x07, 0x4b, 0x20, 0xf0, 0x2a, 0xc8, 0xd7, 0x49, 0x10, 0x87, 0xd2, 0x4b, 0xca, - 0xdb, 0x2f, 0x31, 0x22, 0x12, 0x3c, 0xf8, 0x3d, 0x90, 0xf7, 0xe5, 0x84, 0x59, 0x04, 0xbd, 0xda, - 0xfb, 0x65, 0xe6, 0xde, 0x4a, 0xd1, 0x85, 0x23, 0x05, 0x28, 0x7c, 0x0e, 0xe4, 0x69, 0x2d, 0x08, - 0xb1, 0x74, 0xe2, 0x74, 0x22, 0x54, 0x65, 0xc4, 0xa3, 0x46, 0xf1, 0x6c, 0xa2, 0x8e, 0x13, 0x90, - 0x10, 0x86, 0x3f, 0x32, 0x40, 0x41, 0x1e, 0x17, 0xd4, 0x1c, 0xe4, 0xe1, 0xf9, 0x5a, 0xef, 0xed, - 0x96, 0x65, 0x6f, 0xba, 0x66, 0x92, 0x40, 0x91, 0x02, 0x87, 0x3f, 0x30, 0x00, 0xa8, 0xa9, 0xb3, - 0xcb, 0x1c, 0xe2, 0x3e, 0xec, 0xd9, 0x56, 0xd1, 0x4e, 0x45, 0x11, 0x08, 0x69, 0xa9, 0xa4, 0xa1, - 0xc2, 0x2a, 0x98, 0x08, 0x09, 0xe6, 0xba, 0xef, 0xfa, 0xbb, 0x7e, 0xb0, 0xef, 0xdf, 0x74, 0xb0, - 0x6b, 0x53, 0x13, 0xcc, 0x18, 0xb3, 0x85, 0xca, 0x65, 0x69, 0xff, 0xc4, 0x46, 0x27, 0x21, 0xd4, - 0x79, 0x6c, 0xe9, 0xdd, 0xbe, 0x6c, 0xad, 0x95, 0x3d, 0x2f, 0xe0, 0xfb, 0x62, 0xf2, 0x22, 0x0f, - 0x53, 0xd3, 0xe0, 0x0b, 0xf1, 0x46, 0xef, 0x17, 0x42, 0xe5, 0xfa, 0xf4, 0x90, 0x56, 0x24, 0x8a, - 0x34, 0x13, 0xe0, 0x07, 0x06, 0x38, 0x6b, 0xd5, 0x6a, 0x38, 0x8c, 0xb0, 0x2d, 0xb6, 0x71, 0xee, - 0x74, 0xa3, 0x7a, 0x42, 0x1a, 0x74, 0x76, 0x41, 0x47, 0x45, 0xad, 0x46, 0xc0, 0x17, 0xc1, 0x39, - 0x1a, 0x05, 0x04, 0xdb, 0x49, 0x04, 0xc9, 0xec, 0x02, 0x9b, 0x8d, 0xe2, 0xb9, 0x6a, 0x0b, 0x07, - 0x65, 0x24, 0x4b, 0x9f, 0xe4, 0x41, 0xf1, 0x21, 0x11, 0xfa, 0x08, 0x45, 0xef, 0x35, 0x30, 0xc0, - 0x67, 0x6a, 0x73, 0x87, 0x14, 0xb4, 0xa3, 0x9e, 0x53, 0x91, 0xe4, 0xb2, 0xe3, 0x89, 0xe1, 0xb3, - 0xe3, 0xa9, 0x8f, 0x0b, 0xaa, 0xe3, 0xa9, 0x2a, 0xc8, 0x28, 0xe1, 0xc3, 0x79, 0x00, 0x6c, 0x1c, - 0x12, 0xcc, 0x32, 0x92, 0x6d, 0x0e, 0x72, 0x69, 0xb5, 0x3e, 0x4b, 0x8a, 0x83, 0x34, 0x29, 0x78, - 0x13, 0xc0, 0xe4, 0x9f, 0x13, 0xf8, 0xaf, 0x5a, 0xc4, 0x77, 0xfc, 0xba, 0x59, 0xe0, 0x66, 0x4f, - 0xb2, 0xd3, 0x76, 0xa9, 0x8d, 0x8b, 0x3a, 0x8c, 0x80, 0x7b, 0x60, 0x40, 0x5c, 0xa3, 0x79, 0xde, - 0xe8, 0xe1, 0x8e, 0xbb, 0x67, 0xb9, 0x8e, 0xcd, 0xa1, 0x2a, 0x80, 0xbb, 0x87, 0xa3, 0x20, 0x89, - 0x06, 0xdf, 0x33, 0xc0, 0x08, 0x8d, 0xb7, 0x88, 0x94, 0xa6, 0x3c, 0xab, 0x0f, 0xcf, 0xdf, 0xe9, - 0x15, 0x7c, 0x55, 0xd3, 0x5d, 0x19, 0x6b, 0x36, 0x8a, 0x23, 0x3a, 0x05, 0xb5, 0x60, 0xc3, 0x3f, - 0x1a, 0xc0, 0xb4, 0x6c, 0x11, 0xfa, 0x96, 0xbb, 0x41, 0x1c, 0x3f, 0xc2, 0x44, 0x5c, 0x88, 0xc4, - 0xf1, 0xd1, 0xc3, 0x5a, 0x31, 0x7b, 0xcf, 0xaa, 0xcc, 0xc8, 0x95, 0x36, 0x17, 0xba, 0x58, 0x80, - 0xba, 0xda, 0x56, 0xfa, 0x8f, 0x91, 0x4d, 0x2d, 0xda, 0x2c, 0xab, 0x35, 0xcb, 0xc5, 0x70, 0x09, - 0x8c, 0xb1, 0xea, 0x17, 0xe1, 0xd0, 0x75, 0x6a, 0x16, 0xe5, 0xb7, 0x1f, 0x11, 0xdd, 0xea, 0x1a, - 0x5e, 0xcd, 0xf0, 0x51, 0xdb, 0x08, 0xf8, 0x0a, 0x80, 0xa2, 0x2c, 0x6c, 0xd1, 0x23, 0x2a, 0x01, - 0x55, 0xe0, 0x55, 0xdb, 0x24, 0x50, 0x87, 0x51, 0x70, 0x11, 0x8c, 0xbb, 0xd6, 0x16, 0x76, 0xab, - 0xd8, 0xc5, 0xb5, 0x28, 0x20, 0x5c, 0x95, 0xb8, 0x1f, 0x4e, 0x34, 0x1b, 0xc5, 0xf1, 0xd5, 0x2c, - 0x13, 0xb5, 0xcb, 0x97, 0xae, 0x64, 0xf7, 0xb2, 0x3e, 0x71, 0x51, 0x6c, 0x7f, 0x98, 0x03, 0x53, - 0xdd, 0x83, 0x02, 0x7e, 0x5f, 0x95, 0xc6, 0xa2, 0xe2, 0x7b, 0xed, 0x14, 0x42, 0x4f, 0x5e, 0x07, - 0x40, 0xfb, 0x55, 0x00, 0x1e, 0xb2, 0xf3, 0xda, 0x72, 0x93, 0x6b, 0xff, 0xe6, 0x69, 0xa0, 0x33, - 0xfd, 0x95, 0x21, 0x51, 0x05, 0x58, 0x2e, 0x3f, 0xf4, 0x2d, 0x17, 0x97, 0x3e, 0x6a, 0xbb, 0xda, - 0xa6, 0x9b, 0x15, 0xfe, 0xd8, 0x00, 0xa3, 0x41, 0x88, 0xfd, 0x85, 0x8d, 0x95, 0x7b, 0x5f, 0x15, - 0x9b, 0x56, 0x3a, 0x68, 0xe5, 0xf8, 0x26, 0xb2, 0xfb, 0xb5, 0xd0, 0xb5, 0x41, 0x82, 0x90, 0x56, - 0xce, 0x37, 0x1b, 0xc5, 0xd1, 0xf5, 0x56, 0x14, 0x94, 0x85, 0x2d, 0x79, 0x60, 0x62, 0xf9, 0x20, - 0xc2, 0xc4, 0xb7, 0xdc, 0xa5, 0xa0, 0x16, 0x7b, 0xd8, 0x8f, 0x84, 0x8d, 0x99, 0x76, 0x81, 0xf1, - 0x88, 0xed, 0x82, 0xcb, 0xa0, 0x2f, 0x26, 0xae, 0x8c, 0xda, 0x61, 0xd5, 0x04, 0x43, 0xab, 0x88, - 0xd1, 0x4b, 0x57, 0x40, 0x3f, 0xb3, 0x13, 0x5e, 0x04, 0x7d, 0xc4, 0xda, 0xe7, 0x5a, 0x47, 0x2a, - 0x83, 0x4c, 0x04, 0x59, 0xfb, 0x88, 0xd1, 0x4a, 0xff, 0x98, 0x01, 0xa3, 0x99, 0xb9, 0xc0, 0x29, - 0x90, 0x53, 0x9d, 0x35, 0x20, 0x95, 0xe6, 0x56, 0x96, 0x50, 0xce, 0xb1, 0xe1, 0x0b, 0x2a, 0xbb, - 0x0a, 0xd0, 0xa2, 0x3a, 0x2c, 0x38, 0x95, 0x95, 0x65, 0xa9, 0x3a, 0x66, 0x48, 0x92, 0x1e, 0x99, - 0x0d, 0x78, 0x5b, 0xee, 0x0a, 0x61, 0x03, 0xde, 0x46, 0x8c, 0x76, 0xdc, 0x5e, 0x49, 0xd2, 0xac, - 0xc9, 0x3f, 0x42, 0xb3, 0x66, 0xe0, 0x81, 0xcd, 0x9a, 0xab, 0x20, 0x1f, 0x39, 0x91, 0x8b, 0xf9, - 0x49, 0xa5, 0x15, 0xc3, 0x77, 0x18, 0x11, 0x09, 0x1e, 0xc4, 0x60, 0xd0, 0xc6, 0xdb, 0x56, 0xec, - 0x46, 0xfc, 0x50, 0x1a, 0x9e, 0xff, 0xd6, 0xc9, 0xa2, 0x47, 0x34, 0x33, 0x96, 0x84, 0x4a, 0x94, - 0xe8, 0x86, 0x8f, 0x83, 0x41, 0xcf, 0x3a, 0x70, 0xbc, 0xd8, 0xe3, 0x15, 0xa3, 0x21, 0xc4, 0xd6, - 0x04, 0x09, 0x25, 0x3c, 0x96, 0x04, 0xf1, 0x41, 0xcd, 0x8d, 0xa9, 0xb3, 0x87, 0x25, 0x53, 0x96, - 0x74, 0x2a, 0x09, 0x2e, 0x67, 0xf8, 0xa8, 0x6d, 0x04, 0x07, 0x73, 0x7c, 0x3e, 0x78, 0x58, 0x03, - 0x13, 0x24, 0x94, 0xf0, 0x5a, 0xc1, 0xa4, 0xfc, 0x48, 0x37, 0x30, 0x39, 0xb8, 0x6d, 0x04, 0x7c, - 0x0a, 0x0c, 0x79, 0xd6, 0xc1, 0x2a, 0xf6, 0xeb, 0xd1, 0x8e, 0x79, 0x76, 0xc6, 0x98, 0xed, 0xab, - 0x9c, 0x6d, 0x36, 0x8a, 0x43, 0x6b, 0x09, 0x11, 0xa5, 0x7c, 0x2e, 0xec, 0xf8, 0x52, 0xf8, 0x9c, - 0x26, 0x9c, 0x10, 0x51, 0xca, 0x67, 0x95, 0x49, 0x68, 0x45, 0x6c, 0x5f, 0x99, 0xa3, 0xad, 0x17, - 0xe7, 0x0d, 0x41, 0x46, 0x09, 0x1f, 0xce, 0x82, 0x82, 0x67, 0x1d, 0xf0, 0x3b, 0xa5, 0x39, 0xc6, - 0xd5, 0xf2, 0x86, 0xe2, 0x9a, 0xa4, 0x21, 0xc5, 0xe5, 0x92, 0x8e, 0x2f, 0x24, 0xc7, 0x35, 0x49, - 0x49, 0x43, 0x8a, 0xcb, 0xe2, 0x37, 0xf6, 0x9d, 0xfb, 0x31, 0x16, 0xc2, 0x90, 0x7b, 0x46, 0xc5, - 0xef, 0xdd, 0x94, 0x85, 0x74, 0x39, 0x76, 0xa7, 0xf3, 0x62, 0x37, 0x72, 0x42, 0x17, 0xaf, 0x6f, - 0x9b, 0xe7, 0xb9, 0xff, 0x79, 0x29, 0xbf, 0xa6, 0xa8, 0x48, 0x93, 0x80, 0x6f, 0x81, 0x7e, 0xec, - 0xc7, 0x9e, 0x79, 0x81, 0x1f, 0xdf, 0x27, 0x8d, 0x3e, 0xb5, 0x5f, 0x96, 0xfd, 0xd8, 0x43, 0x5c, - 0x33, 0x7c, 0x01, 0x9c, 0xf5, 0xac, 0x03, 0x96, 0x04, 0x30, 0x89, 0xd8, 0x45, 0x73, 0x82, 0xcf, - 0x7b, 0x9c, 0x15, 0xb1, 0x6b, 0x3a, 0x03, 0xb5, 0xca, 0xf1, 0x81, 0x8e, 0xaf, 0x0d, 0x9c, 0xd4, - 0x06, 0xea, 0x0c, 0xd4, 0x2a, 0xc7, 0x9c, 0x4c, 0xf0, 0xfd, 0xd8, 0x21, 0xd8, 0x36, 0xbf, 0xc4, - 0xeb, 0x5e, 0xd9, 0xdf, 0x15, 0x34, 0xa4, 0xb8, 0xf0, 0x7e, 0xd2, 0x72, 0x30, 0xf9, 0xe6, 0xdb, - 0xe8, 0x59, 0xea, 0x5e, 0x27, 0x0b, 0x84, 0x58, 0x87, 0xe2, 0x54, 0xd1, 0x9b, 0x0d, 0xd0, 0x07, - 0x79, 0xcb, 0x75, 0xd7, 0xb7, 0xcd, 0x8b, 0xdc, 0xe3, 0x3d, 0x3c, 0x2d, 0x54, 0x86, 0x59, 0x60, - 0xfa, 0x91, 0x80, 0x61, 0x78, 0x81, 0xcf, 0x62, 0x61, 0xea, 0xd4, 0xf0, 0xd6, 0x99, 0x7e, 0x24, - 0x60, 0xf8, 0xfc, 0xfc, 0xc3, 0xf5, 0x6d, 0xf3, 0xb1, 0xd3, 0x9b, 0x1f, 0xd3, 0x8f, 0x04, 0x0c, - 0xb4, 0x41, 0x9f, 0x1f, 0x44, 0xe6, 0xa5, 0x5e, 0x9f, 0xbd, 0xfc, 0x34, 0xb9, 0x1d, 0x44, 0x88, - 0xa9, 0x87, 0x3f, 0x35, 0x00, 0x08, 0xd3, 0x48, 0xbc, 0x7c, 0xd2, 0x16, 0x40, 0x06, 0xad, 0x9c, - 0x46, 0xef, 0xb2, 0x1f, 0x91, 0xc3, 0xf4, 0x5e, 0xa3, 0x45, 0xb9, 0x66, 0x00, 0xfc, 0xa5, 0x01, - 0x2e, 0xe8, 0xe5, 0xae, 0xb2, 0x6c, 0x9a, 0xfb, 0x61, 0xbd, 0x87, 0x81, 0x5c, 0x09, 0x02, 0xb7, - 0x62, 0x36, 0x1b, 0xc5, 0x0b, 0x0b, 0x1d, 0x00, 0x51, 0x47, 0x33, 0xe0, 0x6f, 0x0d, 0x30, 0x2e, - 0xb3, 0xa3, 0x66, 0x5c, 0x91, 0xbb, 0xed, 0xad, 0x1e, 0xba, 0x2d, 0x0b, 0x21, 0xbc, 0xa7, 0xbe, - 0x32, 0xb6, 0xf1, 0x51, 0xbb, 0x55, 0xf0, 0x0f, 0x06, 0x18, 0xb1, 0x71, 0x88, 0x7d, 0x1b, 0xfb, - 0x35, 0x66, 0xe6, 0xcc, 0x49, 0xfb, 0x0a, 0x59, 0x33, 0x97, 0x34, 0xed, 0xc2, 0xc2, 0xb2, 0xb4, - 0x70, 0x44, 0x67, 0x1d, 0x35, 0x8a, 0x93, 0xe9, 0x50, 0x9d, 0x83, 0x5a, 0x0c, 0x84, 0x3f, 0x33, - 0xc0, 0x68, 0xea, 0x76, 0x71, 0x40, 0x5c, 0x39, 0x9d, 0x85, 0xe7, 0x25, 0xe8, 0x42, 0x2b, 0x16, - 0xca, 0x82, 0xc3, 0xdf, 0x19, 0xac, 0xda, 0x4a, 0xee, 0x6a, 0xd4, 0x2c, 0x71, 0x0f, 0xbe, 0xde, - 0x4b, 0x0f, 0x2a, 0xe5, 0xc2, 0x81, 0xd7, 0xd3, 0x4a, 0x4e, 0x71, 0x8e, 0x1a, 0xc5, 0x09, 0xdd, - 0x7f, 0x8a, 0x81, 0x74, 0xe3, 0xe0, 0xbb, 0x06, 0x18, 0xc1, 0x69, 0xc1, 0x4c, 0xcd, 0xab, 0x27, - 0x75, 0x5d, 0xc7, 0xf2, 0x5b, 0x5c, 0xa7, 0x35, 0x16, 0x45, 0x2d, 0xb0, 0xac, 0xf6, 0xc3, 0x07, - 0x96, 0x17, 0xba, 0xd8, 0xfc, 0x72, 0xef, 0x6a, 0xbf, 0x65, 0xa1, 0x12, 0x25, 0xba, 0xe1, 0x75, - 0x50, 0xf0, 0x63, 0xd7, 0xb5, 0xb6, 0x5c, 0x6c, 0x3e, 0xce, 0xab, 0x08, 0xd5, 0x5f, 0xbc, 0x2d, - 0xe9, 0x48, 0x49, 0xc0, 0x6d, 0x30, 0x73, 0x70, 0x4b, 0x3d, 0xbe, 0xe8, 0xd8, 0xc0, 0x33, 0xaf, - 0x71, 0x2d, 0x53, 0xcd, 0x46, 0x71, 0x72, 0xb3, 0x73, 0x8b, 0xef, 0xa1, 0x3a, 0xe0, 0x1b, 0xe0, - 0x31, 0x4d, 0x66, 0xd9, 0xdb, 0xc2, 0xb6, 0x8d, 0xed, 0xe4, 0xa2, 0x65, 0x7e, 0x85, 0x43, 0xa8, - 0x7d, 0xbc, 0x99, 0x15, 0x40, 0x0f, 0x1a, 0x0d, 0x57, 0xc1, 0xa4, 0xc6, 0x5e, 0xf1, 0xa3, 0x75, - 0x52, 0x8d, 0x88, 0xe3, 0xd7, 0xcd, 0x59, 0xae, 0xf7, 0x42, 0xb2, 0xfb, 0x36, 0x35, 0x1e, 0xea, - 0x32, 0x06, 0xbe, 0xdc, 0xa2, 0x8d, 0x7f, 0xb8, 0xb0, 0xc2, 0x5b, 0xf8, 0x90, 0x9a, 0x4f, 0xf0, - 0xe2, 0x82, 0xaf, 0xf3, 0xa6, 0x46, 0x47, 0x5d, 0xe4, 0xe1, 0xb7, 0xc1, 0xf9, 0x0c, 0x87, 0xdd, - 0x2b, 0xcc, 0x27, 0xc5, 0x05, 0x81, 0x55, 0xa2, 0x9b, 0x09, 0x11, 0x75, 0x92, 0x84, 0xdf, 0x04, - 0x50, 0x23, 0xaf, 0x59, 0x21, 0x1f, 0xff, 0x94, 0xb8, 0xab, 0xb0, 0x15, 0xdd, 0x94, 0x34, 0xd4, - 0x41, 0x0e, 0x7e, 0x68, 0xb4, 0xcc, 0x24, 0xbd, 0xcd, 0x52, 0xf3, 0x3a, 0xdf, 0xb0, 0x2f, 0x1f, - 0x3f, 0x00, 0x53, 0x65, 0x28, 0x76, 0xb1, 0xe6, 0x61, 0x0d, 0x05, 0x75, 0x41, 0x9f, 0x62, 0x97, - 0xe9, 0x4c, 0x0e, 0x87, 0x63, 0xa0, 0x6f, 0x17, 0xcb, 0xcf, 0xc6, 0x88, 0xfd, 0x84, 0x6f, 0x82, - 0xfc, 0x9e, 0xe5, 0xc6, 0x49, 0x2b, 0xa0, 0x77, 0x67, 0x3d, 0x12, 0x7a, 0x5f, 0xcc, 0xdd, 0x30, - 0xa6, 0xde, 0x37, 0xc0, 0x64, 0xe7, 0x53, 0xe5, 0x8b, 0xb2, 0xe8, 0x17, 0x06, 0x18, 0x6f, 0x3b, - 0x40, 0x3a, 0x18, 0xe3, 0xb6, 0x1a, 0x73, 0xaf, 0x87, 0x27, 0x81, 0xd8, 0x08, 0xbc, 0xa2, 0xd5, - 0x2d, 0xfb, 0x89, 0x01, 0xc6, 0xb2, 0x89, 0xf9, 0x0b, 0xf2, 0x52, 0xe9, 0xbd, 0x1c, 0x98, 0xec, - 0x5c, 0x83, 0x43, 0x4f, 0x75, 0x17, 0x7a, 0xde, 0xa0, 0xe9, 0xd4, 0xb2, 0x7d, 0xc7, 0x00, 0xc3, - 0x6f, 0x2b, 0xb9, 0xe4, 0x6b, 0x66, 0x2f, 0xbb, 0x42, 0xc9, 0xd1, 0x97, 0x32, 0x28, 0xd2, 0x21, - 0x4b, 0xbf, 0x37, 0xc0, 0x44, 0xc7, 0xe3, 0x1c, 0x5e, 0x03, 0x03, 0x96, 0xeb, 0x06, 0xfb, 0xa2, - 0x9b, 0xa7, 0xb5, 0xe5, 0x17, 0x38, 0x15, 0x49, 0xae, 0xe6, 0xb3, 0xdc, 0xe7, 0xe0, 0xb3, 0xd2, - 0x9f, 0x0c, 0x70, 0xe9, 0x41, 0x51, 0xf7, 0x79, 0xaf, 0xe1, 0x2c, 0x28, 0xc8, 0x62, 0xfb, 0x90, - 0xaf, 0x9f, 0xcc, 0xae, 0x32, 0x23, 0xf0, 0xd7, 0x32, 0xe2, 0x57, 0xe9, 0xd7, 0x06, 0x18, 0xab, - 0x62, 0xb2, 0xe7, 0xd4, 0x30, 0xc2, 0xdb, 0x98, 0x60, 0xbf, 0x86, 0xe1, 0x1c, 0x18, 0xe2, 0x5f, - 0x1b, 0x43, 0xab, 0x96, 0x7c, 0x23, 0x19, 0x97, 0x8e, 0x1e, 0xba, 0x9d, 0x30, 0x50, 0x2a, 0xa3, - 0xbe, 0xa7, 0xe4, 0xba, 0x7e, 0x4f, 0xb9, 0x04, 0xfa, 0xc3, 0xb4, 0x01, 0x5c, 0x60, 0x5c, 0xde, - 0xf3, 0xe5, 0x54, 0xce, 0x0d, 0x48, 0xc4, 0xbb, 0x5c, 0x79, 0xc9, 0x0d, 0x48, 0x84, 0x38, 0xb5, - 0xf4, 0x41, 0x0e, 0x9c, 0x6b, 0xcd, 0xcf, 0x0c, 0x90, 0xc4, 0x6e, 0xdb, 0x07, 0x1c, 0xc6, 0x43, - 0x9c, 0xa3, 0xbf, 0x1b, 0xc8, 0x3d, 0xf8, 0xdd, 0x00, 0x7c, 0x09, 0x8c, 0xcb, 0x9f, 0xcb, 0x07, - 0x21, 0xc1, 0x94, 0x7f, 0x99, 0xec, 0x6b, 0x7d, 0xef, 0xb7, 0x96, 0x15, 0x40, 0xed, 0x63, 0xe0, - 0x37, 0x32, 0x6f, 0x1a, 0xae, 0xa6, 0xef, 0x19, 0x58, 0x6d, 0xc7, 0x4b, 0x87, 0x7b, 0x6c, 0xcb, - 0x2f, 0x13, 0x12, 0x90, 0xcc, 0x43, 0x87, 0x39, 0x30, 0xb4, 0xcd, 0x04, 0x78, 0x9f, 0x3c, 0xdf, - 0xea, 0xf4, 0x9b, 0x09, 0x03, 0xa5, 0x32, 0xa5, 0x3f, 0x1b, 0xe0, 0x7c, 0xf2, 0x1a, 0xc8, 0x75, - 0xb0, 0x1f, 0x2d, 0x06, 0xfe, 0xb6, 0x53, 0x87, 0x17, 0x45, 0xff, 0x53, 0x6b, 0x2a, 0x26, 0xbd, - 0x4f, 0x78, 0x1f, 0x0c, 0x52, 0xb1, 0xd8, 0x32, 0x0e, 0x5f, 0x39, 0x7e, 0x1c, 0x66, 0xa3, 0x46, - 0x94, 0x6f, 0x09, 0x35, 0xc1, 0x61, 0xa1, 0x58, 0xb3, 0x2a, 0xb1, 0x6f, 0xcb, 0x1e, 0xf8, 0x88, - 0x08, 0xc5, 0xc5, 0x05, 0x41, 0x43, 0x8a, 0x5b, 0xfa, 0xa7, 0x01, 0xc6, 0xdb, 0x5e, 0x37, 0xc1, - 0x1f, 0x1a, 0x60, 0xa4, 0xa6, 0x4d, 0x4f, 0x6e, 0xe8, 0xb5, 0x93, 0xbf, 0xa0, 0xd2, 0x94, 0x8a, - 0x1a, 0x48, 0xa7, 0xa0, 0x16, 0x50, 0xb8, 0x09, 0xcc, 0x5a, 0xe6, 0x21, 0x61, 0xe6, 0xd3, 0xe4, - 0xa5, 0x66, 0xa3, 0x68, 0x2e, 0x76, 0x91, 0x41, 0x5d, 0x47, 0x57, 0xbe, 0xfb, 0xf1, 0x67, 0xd3, - 0x67, 0x3e, 0xf9, 0x6c, 0xfa, 0xcc, 0xa7, 0x9f, 0x4d, 0x9f, 0x79, 0xa7, 0x39, 0x6d, 0x7c, 0xdc, - 0x9c, 0x36, 0x3e, 0x69, 0x4e, 0x1b, 0x9f, 0x36, 0xa7, 0x8d, 0xbf, 0x35, 0xa7, 0x8d, 0x9f, 0xff, - 0x7d, 0xfa, 0xcc, 0xeb, 0x37, 0x8e, 0xfb, 0x7c, 0xf8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1d, - 0x01, 0xc1, 0x04, 0x92, 0x2c, 0x00, 0x00, + // 3137 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdf, 0x6f, 0x5c, 0x47, + 0xf5, 0xcf, 0x5d, 0x7b, 0xed, 0xf5, 0xd8, 0x89, 0xed, 0x49, 0xec, 0xef, 0x8d, 0x9b, 0x78, 0x9d, + 0xcd, 0xb7, 0xc1, 0x6d, 0xd3, 0x75, 0x1b, 0x5a, 0x1a, 0xca, 0x2f, 0x79, 0x6d, 0xa7, 0x75, 0x13, + 0xc7, 0xd6, 0x6c, 0x92, 0xba, 0x2d, 0xa2, 0xbd, 0xde, 0x3b, 0xbb, 0xbe, 0xf5, 0xfd, 0x95, 0x99, + 0x7b, 0xfd, 0x43, 0x02, 0xa9, 0x02, 0x55, 0x40, 0x25, 0x28, 0x0f, 0xa8, 0x3c, 0x21, 0x84, 0x50, + 0x1f, 0xe0, 0x01, 0xde, 0xe0, 0x5f, 0xe8, 0x0b, 0x52, 0x25, 0x24, 0x54, 0x09, 0x69, 0x45, 0x97, + 0x7f, 0x00, 0x09, 0x10, 0xc2, 0x0f, 0x08, 0xcd, 0x8f, 0x3b, 0x77, 0xf6, 0xee, 0x6e, 0x12, 0xd9, + 0xeb, 0xf6, 0x6d, 0xf7, 0x9c, 0x33, 0xe7, 0x73, 0xe6, 0xcc, 0x99, 0x33, 0x67, 0xce, 0x1d, 0x60, + 0xed, 0x5c, 0xa7, 0x65, 0x27, 0x58, 0xd8, 0x89, 0xb7, 0x30, 0xf1, 0x71, 0x84, 0xe9, 0xc2, 0x2e, + 0xf6, 0xed, 0x80, 0x2c, 0x48, 0x86, 0x15, 0x3a, 0x78, 0x3f, 0xc2, 0x3e, 0x75, 0x02, 0x9f, 0x3e, + 0x6d, 0x85, 0x0e, 0xc5, 0x64, 0x17, 0x93, 0x85, 0x70, 0xa7, 0xc1, 0x78, 0xb4, 0x5d, 0x60, 0x61, + 0xf7, 0xd9, 0x85, 0x06, 0xf6, 0x31, 0xb1, 0x22, 0x6c, 0x97, 0x43, 0x12, 0x44, 0x01, 0xbc, 0x2e, + 0x34, 0x95, 0xdb, 0x04, 0xdf, 0x54, 0x9a, 0xca, 0xe1, 0x4e, 0x83, 0xf1, 0x68, 0xbb, 0x40, 0x79, + 0xf7, 0xd9, 0x99, 0xa7, 0x1b, 0x4e, 0xb4, 0x1d, 0x6f, 0x95, 0x6b, 0x81, 0xb7, 0xd0, 0x08, 0x1a, + 0xc1, 0x02, 0x57, 0xb8, 0x15, 0xd7, 0xf9, 0x3f, 0xfe, 0x87, 0xff, 0x12, 0x40, 0x33, 0xcf, 0xa5, + 0x26, 0x7b, 0x56, 0x6d, 0xdb, 0xf1, 0x31, 0x39, 0x48, 0xed, 0xf4, 0x70, 0x64, 0x75, 0x31, 0x6f, + 0x66, 0xa1, 0xd7, 0x28, 0x12, 0xfb, 0x91, 0xe3, 0xe1, 0x8e, 0x01, 0x5f, 0x7a, 0xd8, 0x00, 0x5a, + 0xdb, 0xc6, 0x9e, 0x95, 0x1d, 0x57, 0x3a, 0x34, 0xc0, 0xe4, 0x52, 0xe0, 0xef, 0x62, 0xc2, 0x26, + 0x88, 0xf0, 0xfd, 0x18, 0xd3, 0x08, 0x56, 0xc0, 0x40, 0xec, 0xd8, 0xa6, 0x31, 0x67, 0xcc, 0x8f, + 0x54, 0x9e, 0xf9, 0xa8, 0x59, 0x3c, 0xd5, 0x6a, 0x16, 0x07, 0xee, 0xae, 0x2e, 0x1f, 0x36, 0x8b, + 0x97, 0x7a, 0x21, 0x45, 0x07, 0x21, 0xa6, 0xe5, 0xbb, 0xab, 0xcb, 0x88, 0x0d, 0x86, 0x2f, 0x81, + 0x49, 0x1b, 0x53, 0x87, 0x60, 0x7b, 0x71, 0x63, 0xf5, 0x9e, 0xd0, 0x6f, 0xe6, 0xb8, 0xc6, 0xf3, + 0x52, 0xe3, 0xe4, 0x72, 0x56, 0x00, 0x75, 0x8e, 0x81, 0x9b, 0x60, 0x38, 0xd8, 0x7a, 0x1b, 0xd7, + 0x22, 0x6a, 0x0e, 0xcc, 0x0d, 0xcc, 0x8f, 0x5e, 0x7b, 0xba, 0x9c, 0x2e, 0x9e, 0x32, 0x81, 0xaf, + 0x98, 0x9c, 0x6c, 0x19, 0x59, 0x7b, 0x2b, 0xc9, 0xa2, 0x55, 0xc6, 0x25, 0xda, 0xf0, 0xba, 0xd0, + 0x82, 0x12, 0x75, 0xa5, 0x5f, 0xe5, 0x00, 0xd4, 0x27, 0x4f, 0xc3, 0xc0, 0xa7, 0xb8, 0x2f, 0xb3, + 0xa7, 0x60, 0xa2, 0xc6, 0x35, 0x47, 0xd8, 0x96, 0xb8, 0x66, 0xee, 0x28, 0xd6, 0x9b, 0x12, 0x7f, + 0x62, 0x29, 0xa3, 0x0e, 0x75, 0x00, 0xc0, 0x3b, 0x60, 0x88, 0x60, 0x1a, 0xbb, 0x91, 0x39, 0x30, + 0x67, 0xcc, 0x8f, 0x5e, 0xbb, 0xda, 0x13, 0x8a, 0x87, 0x36, 0x0b, 0xbe, 0xf2, 0xee, 0xb3, 0xe5, + 0x6a, 0x64, 0x45, 0x31, 0xad, 0x9c, 0x91, 0x48, 0x43, 0x88, 0xeb, 0x40, 0x52, 0x57, 0xe9, 0xbf, + 0x06, 0x98, 0xd0, 0xbd, 0xb4, 0xeb, 0xe0, 0x3d, 0x48, 0xc0, 0x30, 0x11, 0xc1, 0xc2, 0xfd, 0x34, + 0x7a, 0xed, 0x66, 0xf9, 0xa8, 0x3b, 0xaa, 0xdc, 0x11, 0x7f, 0x95, 0x51, 0xb6, 0x5c, 0xf2, 0x0f, + 0x4a, 0x80, 0xe0, 0x2e, 0x28, 0x10, 0xb9, 0x46, 0x3c, 0x90, 0x46, 0xaf, 0xdd, 0xea, 0x0f, 0xa8, + 0xd0, 0x59, 0x19, 0x6b, 0x35, 0x8b, 0x85, 0xe4, 0x1f, 0x52, 0x58, 0xa5, 0x5f, 0xe4, 0xc0, 0xec, + 0x52, 0x4c, 0xa3, 0xc0, 0x43, 0x98, 0x06, 0x31, 0xa9, 0xe1, 0xa5, 0xc0, 0x8d, 0x3d, 0x7f, 0x19, + 0xd7, 0x1d, 0xdf, 0x89, 0x58, 0x8c, 0xce, 0x81, 0x41, 0xdf, 0xf2, 0xb0, 0x8c, 0x99, 0x31, 0xe9, + 0xc9, 0xc1, 0xdb, 0x96, 0x87, 0x11, 0xe7, 0x30, 0x09, 0x16, 0x22, 0x72, 0x07, 0x28, 0x89, 0x3b, + 0x07, 0x21, 0x46, 0x9c, 0x03, 0xaf, 0x80, 0xa1, 0x7a, 0x40, 0x3c, 0x4b, 0xac, 0xde, 0x48, 0xba, + 0x1e, 0x37, 0x38, 0x15, 0x49, 0x2e, 0x7c, 0x1e, 0x8c, 0xda, 0x98, 0xd6, 0x88, 0x13, 0x32, 0x68, + 0x73, 0x90, 0x0b, 0x9f, 0x95, 0xc2, 0xa3, 0xcb, 0x29, 0x0b, 0xe9, 0x72, 0xf0, 0x2a, 0x28, 0x84, + 0xc4, 0x09, 0x88, 0x13, 0x1d, 0x98, 0xf9, 0x39, 0x63, 0x3e, 0x5f, 0x99, 0x90, 0x63, 0x0a, 0x1b, + 0x92, 0x8e, 0x94, 0x04, 0x93, 0x7e, 0x9b, 0x06, 0xfe, 0x86, 0x15, 0x6d, 0x9b, 0x43, 0x1c, 0x41, + 0x49, 0xbf, 0x52, 0x5d, 0xbf, 0xcd, 0xe8, 0x48, 0x49, 0x94, 0xfe, 0x6c, 0x00, 0x33, 0xeb, 0xa1, + 0xc4, 0xbd, 0xf0, 0x06, 0x28, 0xd0, 0x88, 0xe5, 0x9c, 0xc6, 0x81, 0xf4, 0xcf, 0x93, 0x89, 0xaa, + 0xaa, 0xa4, 0x1f, 0x36, 0x8b, 0xd3, 0xe9, 0x88, 0x84, 0xca, 0x7d, 0xa3, 0xc6, 0xb2, 0x90, 0xdb, + 0xc3, 0x5b, 0xdb, 0x41, 0xb0, 0x23, 0x57, 0xff, 0x18, 0x21, 0xf7, 0xaa, 0x50, 0x94, 0x62, 0x8a, + 0x90, 0x93, 0x64, 0x94, 0x00, 0x95, 0xfe, 0x93, 0xcb, 0x4e, 0x4c, 0x5b, 0xf4, 0xb7, 0x40, 0x81, + 0x6d, 0x21, 0xdb, 0x8a, 0x2c, 0xb9, 0x09, 0x9e, 0x79, 0xb4, 0x0d, 0x27, 0xf6, 0xeb, 0x1a, 0x8e, + 0xac, 0x0a, 0x94, 0xae, 0x00, 0x29, 0x0d, 0x29, 0xad, 0x70, 0x1f, 0x0c, 0xd2, 0x10, 0xd7, 0xe4, + 0x7c, 0xef, 0x1d, 0x23, 0xda, 0x7b, 0xcc, 0xa1, 0x1a, 0xe2, 0x5a, 0x1a, 0x8c, 0xec, 0x1f, 0xe2, + 0x88, 0xf0, 0x1d, 0x03, 0x0c, 0x51, 0x9e, 0x17, 0x64, 0x2e, 0xd9, 0x3c, 0x01, 0xf0, 0x4c, 0xde, + 0x11, 0xff, 0x91, 0xc4, 0x2d, 0xfd, 0x33, 0x07, 0x2e, 0xf5, 0x1a, 0xba, 0x14, 0xf8, 0xb6, 0x58, + 0x84, 0x55, 0xb9, 0xaf, 0x44, 0x64, 0x3d, 0xaf, 0xef, 0xab, 0xc3, 0x66, 0xf1, 0xf1, 0x87, 0x2a, + 0xd0, 0x36, 0xe0, 0x97, 0xd5, 0x94, 0xc5, 0x26, 0xbd, 0xd4, 0x6e, 0xd8, 0x61, 0xb3, 0x38, 0xae, + 0x86, 0xb5, 0xdb, 0x0a, 0x77, 0x01, 0x74, 0x2d, 0x1a, 0xdd, 0x21, 0x96, 0x4f, 0x85, 0x5a, 0xc7, + 0xc3, 0xd2, 0x73, 0x4f, 0x3e, 0x5a, 0x50, 0xb0, 0x11, 0x95, 0x19, 0x09, 0x09, 0x6f, 0x75, 0x68, + 0x43, 0x5d, 0x10, 0x58, 0xce, 0x20, 0xd8, 0xa2, 0x2a, 0x0d, 0x68, 0x39, 0x9c, 0x51, 0x91, 0xe4, + 0xc2, 0x27, 0xc0, 0xb0, 0x87, 0x29, 0xb5, 0x1a, 0x98, 0xef, 0xfd, 0x91, 0xf4, 0x50, 0x5c, 0x13, + 0x64, 0x94, 0xf0, 0x4b, 0xff, 0x32, 0xc0, 0x85, 0x5e, 0x5e, 0xbb, 0xe5, 0xd0, 0x08, 0x7e, 0xb3, + 0x23, 0xec, 0xcb, 0x8f, 0x36, 0x43, 0x36, 0x9a, 0x07, 0xbd, 0x4a, 0x25, 0x09, 0x45, 0x0b, 0xf9, + 0x3d, 0x90, 0x77, 0x22, 0xec, 0x25, 0xa7, 0x25, 0xea, 0x7f, 0xd8, 0x55, 0x4e, 0x4b, 0xf8, 0xfc, + 0x2a, 0x03, 0x42, 0x02, 0xaf, 0xf4, 0x61, 0x0e, 0x5c, 0xec, 0x35, 0x84, 0xe5, 0x71, 0xca, 0x9c, + 0x1d, 0xba, 0x31, 0xb1, 0x5c, 0x19, 0x6c, 0xca, 0xd9, 0x1b, 0x9c, 0x8a, 0x24, 0x97, 0xe5, 0x4e, + 0xea, 0xf8, 0x8d, 0xd8, 0xb5, 0x88, 0x8c, 0x24, 0x35, 0xe1, 0xaa, 0xa4, 0x23, 0x25, 0x01, 0xcb, + 0x00, 0xd0, 0xed, 0x80, 0x44, 0x1c, 0x83, 0x57, 0x38, 0x23, 0x95, 0x33, 0x2c, 0x23, 0x54, 0x15, + 0x15, 0x69, 0x12, 0xec, 0x20, 0xd9, 0x71, 0x7c, 0x5b, 0x2e, 0xb8, 0xda, 0xbb, 0x37, 0x1d, 0xdf, + 0x46, 0x9c, 0xc3, 0xf0, 0x5d, 0x87, 0x46, 0x8c, 0x22, 0x57, 0xbb, 0xcd, 0xe1, 0x5c, 0x52, 0x49, + 0x30, 0xfc, 0x1a, 0x4b, 0xb0, 0x01, 0x71, 0x30, 0x35, 0x87, 0x52, 0xfc, 0x25, 0x45, 0x45, 0x9a, + 0x44, 0xe9, 0x2f, 0x83, 0xbd, 0xe3, 0x83, 0x25, 0x10, 0x78, 0x19, 0xe4, 0x1b, 0x24, 0x88, 0x43, + 0xe9, 0x25, 0xe5, 0xed, 0x97, 0x18, 0x11, 0x09, 0x1e, 0xfc, 0x36, 0xc8, 0xfb, 0x72, 0xc2, 0x2c, + 0x82, 0x5e, 0xed, 0xff, 0x32, 0x73, 0x6f, 0xa5, 0xe8, 0xc2, 0x91, 0x02, 0x14, 0x3e, 0x07, 0xf2, + 0xb4, 0x16, 0x84, 0x58, 0x3a, 0x71, 0x36, 0x11, 0xaa, 0x32, 0xe2, 0x61, 0xb3, 0x78, 0x3a, 0x51, + 0xc7, 0x09, 0x48, 0x08, 0xc3, 0xef, 0x1b, 0xa0, 0x20, 0x8f, 0x0b, 0x6a, 0x0e, 0xf3, 0xf0, 0x7c, + 0xad, 0xff, 0x76, 0xcb, 0xb2, 0x37, 0x5d, 0x33, 0x49, 0xa0, 0x48, 0x81, 0xc3, 0xef, 0x1a, 0x00, + 0xd4, 0xd4, 0xd9, 0x65, 0x8e, 0x70, 0x1f, 0xf6, 0x6d, 0xab, 0x68, 0xa7, 0xa2, 0x08, 0x84, 0xb4, + 0x54, 0xd2, 0x50, 0x61, 0x15, 0x4c, 0x85, 0x04, 0x73, 0xdd, 0x77, 0xfd, 0x1d, 0x3f, 0xd8, 0xf3, + 0x6f, 0x38, 0xd8, 0xb5, 0xa9, 0x09, 0xe6, 0x8c, 0xf9, 0x42, 0xe5, 0xa2, 0xb4, 0x7f, 0x6a, 0xa3, + 0x9b, 0x10, 0xea, 0x3e, 0xb6, 0xf4, 0xee, 0x40, 0xb6, 0xd6, 0xca, 0x9e, 0x17, 0xf0, 0x7d, 0x31, + 0x79, 0x91, 0x87, 0xa9, 0x69, 0xf0, 0x85, 0x78, 0xa3, 0xff, 0x0b, 0xa1, 0x72, 0x7d, 0x7a, 0x48, + 0x2b, 0x12, 0x45, 0x9a, 0x09, 0xf0, 0xa7, 0x06, 0x38, 0x6d, 0xd5, 0x6a, 0x38, 0x8c, 0xb0, 0x2d, + 0xb6, 0x71, 0xee, 0x64, 0xa3, 0x7a, 0x4a, 0x1a, 0x74, 0x7a, 0x51, 0x47, 0x45, 0xed, 0x46, 0xc0, + 0x17, 0xc1, 0x19, 0x1a, 0x05, 0x04, 0xdb, 0x49, 0x04, 0xc9, 0xec, 0x02, 0x5b, 0xcd, 0xe2, 0x99, + 0x6a, 0x1b, 0x07, 0x65, 0x24, 0x4b, 0x1f, 0xe7, 0x41, 0xf1, 0x21, 0x11, 0xfa, 0x08, 0x45, 0xef, + 0x15, 0x30, 0xc4, 0x67, 0x6a, 0x73, 0x87, 0x14, 0xb4, 0xa3, 0x9e, 0x53, 0x91, 0xe4, 0xb2, 0xe3, + 0x89, 0xe1, 0xb3, 0xe3, 0x69, 0x80, 0x0b, 0xaa, 0xe3, 0xa9, 0x2a, 0xc8, 0x28, 0xe1, 0xc3, 0x6b, + 0x00, 0xd8, 0x38, 0x24, 0x98, 0x65, 0x24, 0xdb, 0x1c, 0xe6, 0xd2, 0x6a, 0x7d, 0x96, 0x15, 0x07, + 0x69, 0x52, 0xf0, 0x06, 0x80, 0xc9, 0x3f, 0x27, 0xf0, 0x5f, 0xb5, 0x88, 0xef, 0xf8, 0x0d, 0xb3, + 0xc0, 0xcd, 0x9e, 0x66, 0xa7, 0xed, 0x72, 0x07, 0x17, 0x75, 0x19, 0x01, 0x77, 0xc1, 0x90, 0xb8, + 0x46, 0xf3, 0xbc, 0xd1, 0xc7, 0x1d, 0x77, 0xcf, 0x72, 0x1d, 0x9b, 0x43, 0x55, 0x00, 0x77, 0x0f, + 0x47, 0x41, 0x12, 0x0d, 0xbe, 0x67, 0x80, 0x31, 0x1a, 0x6f, 0x11, 0x29, 0x4d, 0x79, 0x56, 0x1f, + 0xbd, 0x76, 0xa7, 0x5f, 0xf0, 0x55, 0x4d, 0x77, 0x65, 0xa2, 0xd5, 0x2c, 0x8e, 0xe9, 0x14, 0xd4, + 0x86, 0x0d, 0x7f, 0x6f, 0x00, 0xd3, 0xb2, 0x45, 0xe8, 0x5b, 0xee, 0x06, 0x71, 0xfc, 0x08, 0x13, + 0x71, 0x21, 0x12, 0xc7, 0x47, 0x1f, 0x6b, 0xc5, 0xec, 0x3d, 0xab, 0x32, 0x27, 0x57, 0xda, 0x5c, + 0xec, 0x61, 0x01, 0xea, 0x69, 0x5b, 0xe9, 0xdf, 0x46, 0x36, 0xb5, 0x68, 0xb3, 0xac, 0xd6, 0x2c, + 0x17, 0xc3, 0x65, 0x30, 0xc1, 0xaa, 0x5f, 0x84, 0x43, 0xd7, 0xa9, 0x59, 0x94, 0xdf, 0x7e, 0x44, + 0x74, 0xab, 0x6b, 0x78, 0x35, 0xc3, 0x47, 0x1d, 0x23, 0xe0, 0x2b, 0x00, 0x8a, 0xb2, 0xb0, 0x4d, + 0x8f, 0xa8, 0x04, 0x54, 0x81, 0x57, 0xed, 0x90, 0x40, 0x5d, 0x46, 0xc1, 0x25, 0x30, 0xe9, 0x5a, + 0x5b, 0xd8, 0xad, 0x62, 0x17, 0xd7, 0xa2, 0x80, 0x70, 0x55, 0xe2, 0x7e, 0x38, 0xd5, 0x6a, 0x16, + 0x27, 0x6f, 0x65, 0x99, 0xa8, 0x53, 0xbe, 0x74, 0x29, 0xbb, 0x97, 0xf5, 0x89, 0x8b, 0x62, 0xfb, + 0x83, 0x1c, 0x98, 0xe9, 0x1d, 0x14, 0xf0, 0x3b, 0xaa, 0x34, 0x16, 0x15, 0xdf, 0x6b, 0x27, 0x10, + 0x7a, 0xf2, 0x3a, 0x00, 0x3a, 0xaf, 0x02, 0xf0, 0x80, 0x9d, 0xd7, 0x96, 0x9b, 0x5c, 0xfb, 0x37, + 0x4f, 0x02, 0x9d, 0xe9, 0xaf, 0x8c, 0x88, 0x2a, 0xc0, 0x72, 0xf9, 0xa1, 0x6f, 0xb9, 0xb8, 0xf4, + 0x61, 0xc7, 0xd5, 0x36, 0xdd, 0xac, 0xf0, 0x07, 0x06, 0x18, 0x0f, 0x42, 0xec, 0x2f, 0x6e, 0xac, + 0xde, 0xfb, 0xa2, 0xd8, 0xb4, 0xd2, 0x41, 0xab, 0x47, 0x37, 0x91, 0xdd, 0xaf, 0x85, 0xae, 0x0d, + 0x12, 0x84, 0xb4, 0x72, 0xb6, 0xd5, 0x2c, 0x8e, 0xaf, 0xb7, 0xa3, 0xa0, 0x2c, 0x6c, 0xc9, 0x03, + 0x53, 0x2b, 0xfb, 0x11, 0x26, 0xbe, 0xe5, 0x2e, 0x07, 0xb5, 0xd8, 0xc3, 0x7e, 0x24, 0x6c, 0xcc, + 0xb4, 0x0b, 0x8c, 0x47, 0x6c, 0x17, 0x5c, 0x04, 0x03, 0x31, 0x71, 0x65, 0xd4, 0x8e, 0xaa, 0x26, + 0x18, 0xba, 0x85, 0x18, 0xbd, 0x74, 0x09, 0x0c, 0x32, 0x3b, 0xe1, 0x79, 0x30, 0x40, 0xac, 0x3d, + 0xae, 0x75, 0xac, 0x32, 0xcc, 0x44, 0x90, 0xb5, 0x87, 0x18, 0xad, 0xf4, 0xf7, 0x39, 0x30, 0x9e, + 0x99, 0x0b, 0x9c, 0x01, 0x39, 0xd5, 0x59, 0x03, 0x52, 0x69, 0x6e, 0x75, 0x19, 0xe5, 0x1c, 0x1b, + 0xbe, 0xa0, 0xb2, 0xab, 0x00, 0x2d, 0xaa, 0xc3, 0x82, 0x53, 0x59, 0x59, 0x96, 0xaa, 0x63, 0x86, + 0x24, 0xe9, 0x91, 0xd9, 0x80, 0xeb, 0x72, 0x57, 0x08, 0x1b, 0x70, 0x1d, 0x31, 0xda, 0x51, 0x7b, + 0x25, 0x49, 0xb3, 0x26, 0xff, 0x08, 0xcd, 0x9a, 0xa1, 0x07, 0x36, 0x6b, 0x2e, 0x83, 0x7c, 0xe4, + 0x44, 0x2e, 0xe6, 0x27, 0x95, 0x56, 0x0c, 0xdf, 0x61, 0x44, 0x24, 0x78, 0x10, 0x83, 0x61, 0x1b, + 0xd7, 0xad, 0xd8, 0x8d, 0xf8, 0xa1, 0x34, 0x7a, 0xed, 0xeb, 0xc7, 0x8b, 0x1e, 0xd1, 0xcc, 0x58, + 0x16, 0x2a, 0x51, 0xa2, 0x1b, 0x3e, 0x0e, 0x86, 0x3d, 0x6b, 0xdf, 0xf1, 0x62, 0x8f, 0x57, 0x8c, + 0x86, 0x10, 0x5b, 0x13, 0x24, 0x94, 0xf0, 0x58, 0x12, 0xc4, 0xfb, 0x35, 0x37, 0xa6, 0xce, 0x2e, + 0x96, 0x4c, 0x59, 0xd2, 0xa9, 0x24, 0xb8, 0x92, 0xe1, 0xa3, 0x8e, 0x11, 0x1c, 0xcc, 0xf1, 0xf9, + 0xe0, 0x51, 0x0d, 0x4c, 0x90, 0x50, 0xc2, 0x6b, 0x07, 0x93, 0xf2, 0x63, 0xbd, 0xc0, 0xe4, 0xe0, + 0x8e, 0x11, 0xf0, 0x29, 0x30, 0xe2, 0x59, 0xfb, 0xb7, 0xb0, 0xdf, 0x88, 0xb6, 0xcd, 0xd3, 0x73, + 0xc6, 0xfc, 0x40, 0xe5, 0x74, 0xab, 0x59, 0x1c, 0x59, 0x4b, 0x88, 0x28, 0xe5, 0x73, 0x61, 0xc7, + 0x97, 0xc2, 0x67, 0x34, 0xe1, 0x84, 0x88, 0x52, 0x3e, 0xab, 0x4c, 0x42, 0x2b, 0x62, 0xfb, 0xca, + 0x1c, 0x6f, 0xbf, 0x38, 0x6f, 0x08, 0x32, 0x4a, 0xf8, 0x70, 0x1e, 0x14, 0x3c, 0x6b, 0x9f, 0xdf, + 0x29, 0xcd, 0x09, 0xae, 0x96, 0x37, 0x14, 0xd7, 0x24, 0x0d, 0x29, 0x2e, 0x97, 0x74, 0x7c, 0x21, + 0x39, 0xa9, 0x49, 0x4a, 0x1a, 0x52, 0x5c, 0x16, 0xbf, 0xb1, 0xef, 0xdc, 0x8f, 0xb1, 0x10, 0x86, + 0xdc, 0x33, 0x2a, 0x7e, 0xef, 0xa6, 0x2c, 0xa4, 0xcb, 0xb1, 0x3b, 0x9d, 0x17, 0xbb, 0x91, 0x13, + 0xba, 0x78, 0xbd, 0x6e, 0x9e, 0xe5, 0xfe, 0xe7, 0xa5, 0xfc, 0x9a, 0xa2, 0x22, 0x4d, 0x02, 0xbe, + 0x05, 0x06, 0xb1, 0x1f, 0x7b, 0xe6, 0x39, 0x7e, 0x7c, 0x1f, 0x37, 0xfa, 0xd4, 0x7e, 0x59, 0xf1, + 0x63, 0x0f, 0x71, 0xcd, 0xf0, 0x05, 0x70, 0xda, 0xb3, 0xf6, 0x59, 0x12, 0xc0, 0x24, 0x62, 0x17, + 0xcd, 0x29, 0x3e, 0xef, 0x49, 0x56, 0xc4, 0xae, 0xe9, 0x0c, 0xd4, 0x2e, 0xc7, 0x07, 0x3a, 0xbe, + 0x36, 0x70, 0x5a, 0x1b, 0xa8, 0x33, 0x50, 0xbb, 0x1c, 0x73, 0x32, 0xc1, 0xf7, 0x63, 0x87, 0x60, + 0xdb, 0xfc, 0x3f, 0x5e, 0xf7, 0xca, 0xfe, 0xae, 0xa0, 0x21, 0xc5, 0x85, 0xf7, 0x93, 0x96, 0x83, + 0xc9, 0x37, 0xdf, 0x46, 0xdf, 0x52, 0xf7, 0x3a, 0x59, 0x24, 0xc4, 0x3a, 0x10, 0xa7, 0x8a, 0xde, + 0x6c, 0x80, 0x3e, 0xc8, 0x5b, 0xae, 0xbb, 0x5e, 0x37, 0xcf, 0x73, 0x8f, 0xf7, 0xf1, 0xb4, 0x50, + 0x19, 0x66, 0x91, 0xe9, 0x47, 0x02, 0x86, 0xe1, 0x05, 0x3e, 0x8b, 0x85, 0x99, 0x13, 0xc3, 0x5b, + 0x67, 0xfa, 0x91, 0x80, 0xe1, 0xf3, 0xf3, 0x0f, 0xd6, 0xeb, 0xe6, 0x63, 0x27, 0x37, 0x3f, 0xa6, + 0x1f, 0x09, 0x18, 0x68, 0x83, 0x01, 0x3f, 0x88, 0xcc, 0x0b, 0xfd, 0x3e, 0x7b, 0xf9, 0x69, 0x72, + 0x3b, 0x88, 0x10, 0x53, 0x0f, 0x7f, 0x64, 0x00, 0x10, 0xa6, 0x91, 0x78, 0xf1, 0xb8, 0x2d, 0x80, + 0x0c, 0x5a, 0x39, 0x8d, 0xde, 0x15, 0x3f, 0x22, 0x07, 0xe9, 0xbd, 0x46, 0x8b, 0x72, 0xcd, 0x00, + 0xf8, 0x73, 0x03, 0x9c, 0xd3, 0xcb, 0x5d, 0x65, 0xd9, 0x2c, 0xf7, 0xc3, 0x7a, 0x1f, 0x03, 0xb9, + 0x12, 0x04, 0x6e, 0xc5, 0x6c, 0x35, 0x8b, 0xe7, 0x16, 0xbb, 0x00, 0xa2, 0xae, 0x66, 0xc0, 0x5f, + 0x1b, 0x60, 0x52, 0x66, 0x47, 0xcd, 0xb8, 0x22, 0x77, 0xdb, 0x5b, 0x7d, 0x74, 0x5b, 0x16, 0x42, + 0x78, 0x4f, 0x7d, 0x65, 0xec, 0xe0, 0xa3, 0x4e, 0xab, 0xe0, 0xef, 0x0c, 0x30, 0x66, 0xe3, 0x10, + 0xfb, 0x36, 0xf6, 0x6b, 0xcc, 0xcc, 0xb9, 0xe3, 0xf6, 0x15, 0xb2, 0x66, 0x2e, 0x6b, 0xda, 0x85, + 0x85, 0x65, 0x69, 0xe1, 0x98, 0xce, 0x3a, 0x6c, 0x16, 0xa7, 0xd3, 0xa1, 0x3a, 0x07, 0xb5, 0x19, + 0x08, 0x7f, 0x6c, 0x80, 0xf1, 0xd4, 0xed, 0xe2, 0x80, 0xb8, 0x74, 0x32, 0x0b, 0xcf, 0x4b, 0xd0, + 0xc5, 0x76, 0x2c, 0x94, 0x05, 0x87, 0xbf, 0x31, 0x58, 0xb5, 0x95, 0xdc, 0xd5, 0xa8, 0x59, 0xe2, + 0x1e, 0x7c, 0xbd, 0x9f, 0x1e, 0x54, 0xca, 0x85, 0x03, 0xaf, 0xa6, 0x95, 0x9c, 0xe2, 0x1c, 0x36, + 0x8b, 0x53, 0xba, 0xff, 0x14, 0x03, 0xe9, 0xc6, 0xc1, 0x77, 0x0d, 0x30, 0x86, 0xd3, 0x82, 0x99, + 0x9a, 0x97, 0x8f, 0xeb, 0xba, 0xae, 0xe5, 0xb7, 0xb8, 0x4e, 0x6b, 0x2c, 0x8a, 0xda, 0x60, 0x59, + 0xed, 0x87, 0xf7, 0x2d, 0x2f, 0x74, 0xb1, 0xf9, 0xff, 0xfd, 0xab, 0xfd, 0x56, 0x84, 0x4a, 0x94, + 0xe8, 0x86, 0x57, 0x41, 0xc1, 0x8f, 0x5d, 0xd7, 0xda, 0x72, 0xb1, 0xf9, 0x38, 0xaf, 0x22, 0x54, + 0x7f, 0xf1, 0xb6, 0xa4, 0x23, 0x25, 0x01, 0xeb, 0x60, 0x6e, 0xff, 0xa6, 0x7a, 0x7c, 0xd1, 0xb5, + 0x81, 0x67, 0x5e, 0xe1, 0x5a, 0x66, 0x5a, 0xcd, 0xe2, 0xf4, 0x66, 0xf7, 0x16, 0xdf, 0x43, 0x75, + 0xc0, 0x37, 0xc0, 0x63, 0x9a, 0xcc, 0x8a, 0xb7, 0x85, 0x6d, 0x1b, 0xdb, 0xc9, 0x45, 0xcb, 0xfc, + 0x02, 0x87, 0x50, 0xfb, 0x78, 0x33, 0x2b, 0x80, 0x1e, 0x34, 0x1a, 0xde, 0x02, 0xd3, 0x1a, 0x7b, + 0xd5, 0x8f, 0xd6, 0x49, 0x35, 0x22, 0x8e, 0xdf, 0x30, 0xe7, 0xb9, 0xde, 0x73, 0xc9, 0xee, 0xdb, + 0xd4, 0x78, 0xa8, 0xc7, 0x18, 0xf8, 0x72, 0x9b, 0x36, 0xfe, 0xe1, 0xc2, 0x0a, 0x6f, 0xe2, 0x03, + 0x6a, 0x3e, 0xc1, 0x8b, 0x0b, 0xbe, 0xce, 0x9b, 0x1a, 0x1d, 0xf5, 0x90, 0x87, 0xdf, 0x00, 0x67, + 0x33, 0x1c, 0x76, 0xaf, 0x30, 0x9f, 0x14, 0x17, 0x04, 0x56, 0x89, 0x6e, 0x26, 0x44, 0xd4, 0x4d, + 0x12, 0x7e, 0x15, 0x40, 0x8d, 0xbc, 0x66, 0x85, 0x7c, 0xfc, 0x53, 0xe2, 0xae, 0xc2, 0x56, 0x74, + 0x53, 0xd2, 0x50, 0x17, 0x39, 0xf8, 0x81, 0xd1, 0x36, 0x93, 0xf4, 0x36, 0x4b, 0xcd, 0xab, 0x7c, + 0xc3, 0xbe, 0x7c, 0xf4, 0x00, 0x4c, 0x95, 0xa1, 0xd8, 0xc5, 0x9a, 0x87, 0x35, 0x14, 0xd4, 0x03, + 0x7d, 0x86, 0x5d, 0xa6, 0x33, 0x39, 0x1c, 0x4e, 0x80, 0x81, 0x1d, 0x2c, 0x3f, 0x1b, 0x23, 0xf6, + 0x13, 0xbe, 0x09, 0xf2, 0xbb, 0x96, 0x1b, 0x27, 0xad, 0x80, 0xfe, 0x9d, 0xf5, 0x48, 0xe8, 0x7d, + 0x31, 0x77, 0xdd, 0x98, 0x79, 0xdf, 0x00, 0xd3, 0xdd, 0x4f, 0x95, 0xcf, 0xcb, 0xa2, 0x9f, 0x19, + 0x60, 0xb2, 0xe3, 0x00, 0xe9, 0x62, 0x8c, 0xdb, 0x6e, 0xcc, 0xbd, 0x3e, 0x9e, 0x04, 0x62, 0x23, + 0xf0, 0x8a, 0x56, 0xb7, 0xec, 0x87, 0x06, 0x98, 0xc8, 0x26, 0xe6, 0xcf, 0xc9, 0x4b, 0xa5, 0xf7, + 0x72, 0x60, 0xba, 0x7b, 0x0d, 0x0e, 0x3d, 0xd5, 0x5d, 0xe8, 0x7b, 0x83, 0xa6, 0x5b, 0xcb, 0xf6, + 0x1d, 0x03, 0x8c, 0xbe, 0xad, 0xe4, 0x92, 0xaf, 0x99, 0xfd, 0xec, 0x0a, 0x25, 0x47, 0x5f, 0xca, + 0xa0, 0x48, 0x87, 0x2c, 0xfd, 0xd6, 0x00, 0x53, 0x5d, 0x8f, 0x73, 0x78, 0x05, 0x0c, 0x59, 0xae, + 0x1b, 0xec, 0x89, 0x6e, 0x9e, 0xd6, 0x96, 0x5f, 0xe4, 0x54, 0x24, 0xb9, 0x9a, 0xcf, 0x72, 0x9f, + 0x81, 0xcf, 0x4a, 0x7f, 0x30, 0xc0, 0x85, 0x07, 0x45, 0xdd, 0x67, 0xbd, 0x86, 0xf3, 0xa0, 0x20, + 0x8b, 0xed, 0x03, 0xbe, 0x7e, 0x32, 0xbb, 0xca, 0x8c, 0xc0, 0x5f, 0xcb, 0x88, 0x5f, 0xa5, 0x5f, + 0x1a, 0x60, 0xa2, 0x8a, 0xc9, 0xae, 0x53, 0xc3, 0x08, 0xd7, 0x31, 0xc1, 0x7e, 0x0d, 0xc3, 0x05, + 0x30, 0xc2, 0xbf, 0x36, 0x86, 0x56, 0x2d, 0xf9, 0x46, 0x32, 0x29, 0x1d, 0x3d, 0x72, 0x3b, 0x61, + 0xa0, 0x54, 0x46, 0x7d, 0x4f, 0xc9, 0xf5, 0xfc, 0x9e, 0x72, 0x01, 0x0c, 0x86, 0x69, 0x03, 0xb8, + 0xc0, 0xb8, 0xbc, 0xe7, 0xcb, 0xa9, 0x9c, 0x1b, 0x90, 0x88, 0x77, 0xb9, 0xf2, 0x92, 0x1b, 0x90, + 0x08, 0x71, 0x6a, 0xe9, 0x4f, 0x39, 0x70, 0xa6, 0x3d, 0x3f, 0x33, 0x40, 0x12, 0xbb, 0x1d, 0x1f, + 0x70, 0x18, 0x0f, 0x71, 0x8e, 0xfe, 0x6e, 0x20, 0xf7, 0xe0, 0x77, 0x03, 0xf0, 0x25, 0x30, 0x29, + 0x7f, 0xae, 0xec, 0x87, 0x04, 0x53, 0xfe, 0x65, 0x72, 0xa0, 0xfd, 0xbd, 0xdf, 0x5a, 0x56, 0x00, + 0x75, 0x8e, 0x81, 0x5f, 0xc9, 0xbc, 0x69, 0xb8, 0x9c, 0xbe, 0x67, 0x60, 0xb5, 0x1d, 0x2f, 0x1d, + 0xee, 0xb1, 0x2d, 0xbf, 0x42, 0x48, 0x40, 0x32, 0x0f, 0x1d, 0x16, 0xc0, 0x48, 0x9d, 0x09, 0xf0, + 0x3e, 0x79, 0xbe, 0xdd, 0xe9, 0x37, 0x12, 0x06, 0x4a, 0x65, 0xe0, 0xd7, 0xc0, 0x78, 0x10, 0x8a, + 0x2a, 0x76, 0xdd, 0xb5, 0xab, 0xd8, 0xad, 0xf3, 0x8e, 0x5e, 0x21, 0x69, 0xbb, 0xb6, 0xb1, 0x50, + 0x56, 0xb6, 0xf4, 0x47, 0x03, 0x9c, 0x4d, 0x1e, 0x13, 0xb9, 0x0e, 0xf6, 0xa3, 0xa5, 0xc0, 0xaf, + 0x3b, 0x0d, 0x78, 0x5e, 0xb4, 0x4f, 0xb5, 0x9e, 0x64, 0xd2, 0x3a, 0x85, 0xf7, 0xc1, 0x30, 0x15, + 0xb1, 0x22, 0xc3, 0xf8, 0x95, 0xa3, 0x87, 0x71, 0x36, 0xe8, 0x44, 0xf5, 0x97, 0x50, 0x13, 0x1c, + 0x16, 0xc9, 0x35, 0xab, 0x12, 0xfb, 0xb6, 0x6c, 0xa1, 0x8f, 0x89, 0x48, 0x5e, 0x5a, 0x14, 0x34, + 0xa4, 0xb8, 0xa5, 0x7f, 0x18, 0x60, 0xb2, 0xe3, 0x71, 0x14, 0xfc, 0x9e, 0x01, 0xc6, 0x6a, 0xda, + 0xf4, 0x64, 0x3e, 0x58, 0x3b, 0xfe, 0x03, 0x2c, 0x4d, 0xa9, 0x28, 0xa1, 0x74, 0x0a, 0x6a, 0x03, + 0x85, 0x9b, 0xc0, 0xac, 0x65, 0xde, 0x21, 0x66, 0xbe, 0x6c, 0x5e, 0x68, 0x35, 0x8b, 0xe6, 0x52, + 0x0f, 0x19, 0xd4, 0x73, 0x74, 0xe5, 0x5b, 0x1f, 0x7d, 0x3a, 0x7b, 0xea, 0xe3, 0x4f, 0x67, 0x4f, + 0x7d, 0xf2, 0xe9, 0xec, 0xa9, 0x77, 0x5a, 0xb3, 0xc6, 0x47, 0xad, 0x59, 0xe3, 0xe3, 0xd6, 0xac, + 0xf1, 0x49, 0x6b, 0xd6, 0xf8, 0x6b, 0x6b, 0xd6, 0xf8, 0xc9, 0xdf, 0x66, 0x4f, 0xbd, 0x7e, 0xfd, + 0xa8, 0xaf, 0x8f, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x28, 0x77, 0xf5, 0x22, 0xd1, 0x2c, 0x00, + 0x00, } func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { @@ -2633,6 +2635,16 @@ func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.OptionalOldSelf != nil { + i-- + if *m.OptionalOldSelf { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } i -= len(m.FieldPath) copy(dAtA[i:], m.FieldPath) i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) @@ -3367,6 +3379,9 @@ func (m *ValidationRule) Size() (n int) { } l = len(m.FieldPath) n += 1 + l + sovGenerated(uint64(l)) + if m.OptionalOldSelf != nil { + n += 2 + } return n } @@ -3845,6 +3860,7 @@ func (this *ValidationRule) String() string { `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, `Reason:` + valueToStringGenerated(this.Reason) + `,`, `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `OptionalOldSelf:` + valueToStringGenerated(this.OptionalOldSelf) + `,`, `}`, }, "") return s @@ -9008,6 +9024,27 @@ func (m *ValidationRule) Unmarshal(dAtA []byte) error { } m.FieldPath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OptionalOldSelf", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OptionalOldSelf = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto index 578d018a7b..3c39d63a5f 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto @@ -658,6 +658,18 @@ message ValidationRule { // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with // non-intersecting keys are appended, retaining their partial order. + // + // If `rule` makes use of the `oldSelf` variable it is implicitly a + // `transition rule`. + // + // By default, the `oldSelf` variable is the same type as `self`. + // When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional + // variable whose value() is the same type as `self`. + // See the documentation for the `optionalOldSelf` field for details. + // + // Transition rules by default are applied only on UPDATE requests and are + // skipped if an old value could not be found. You can opt a transition + // rule into unconditional evaluation by setting `optionalOldSelf` to true. optional string rule = 1; // Message represents the message displayed when validation fails. The message is required if the Rule contains @@ -698,6 +710,24 @@ message ValidationRule { // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` // +optional optional string fieldPath = 5; + + // optionalOldSelf is used to opt a transition rule into evaluation + // even when the object is first created, or if the old object is + // missing the value. + // + // When enabled `oldSelf` will be a CEL optional whose value will be + // `None` if there is no old value, or when the object is initially created. + // + // You may check for presence of oldSelf using `oldSelf.hasValue()` and + // unwrap it after checking using `oldSelf.value()`. Check the CEL + // documentation for Optional types for more information: + // https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes + // + // May not be set unless `oldSelf` is used in `rule`. + // + // +featureGate=CRDValidationRatcheting + // +optional + optional bool optionalOldSelf = 6; } // WebhookClientConfig contains the information to make a TLS connection with the webhook. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go index 1c90d464a9..a81451ad6e 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go @@ -249,6 +249,19 @@ type ValidationRule struct { // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with // non-intersecting keys are appended, retaining their partial order. + // + // If `rule` makes use of the `oldSelf` variable it is implicitly a + // `transition rule`. + // + // By default, the `oldSelf` variable is the same type as `self`. + // When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional + // variable whose value() is the same type as `self`. + // See the documentation for the `optionalOldSelf` field for details. + // + // Transition rules by default are applied only on UPDATE requests and are + // skipped if an old value could not be found. You can opt a transition + // rule into unconditional evaluation by setting `optionalOldSelf` to true. + // Rule string `json:"rule" protobuf:"bytes,1,opt,name=rule"` // Message represents the message displayed when validation fails. The message is required if the Rule contains // line breaks. The message must not contain line breaks. @@ -285,6 +298,24 @@ type ValidationRule struct { // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` // +optional FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,5,opt,name=fieldPath"` + + // optionalOldSelf is used to opt a transition rule into evaluation + // even when the object is first created, or if the old object is + // missing the value. + // + // When enabled `oldSelf` will be a CEL optional whose value will be + // `None` if there is no old value, or when the object is initially created. + // + // You may check for presence of oldSelf using `oldSelf.hasValue()` and + // unwrap it after checking using `oldSelf.value()`. Check the CEL + // documentation for Optional types for more information: + // https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes + // + // May not be set unless `oldSelf` is used in `rule`. + // + // +featureGate=CRDValidationRatcheting + // +optional + OptionalOldSelf *bool `json:"optionalOldSelf,omitempty" protobuf:"bytes,6,opt,name=optionalOldSelf"` } // JSON represents any valid JSON value. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go index 0a82e4d8c3..405021bf38 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go @@ -1261,6 +1261,7 @@ func autoConvert_v1_ValidationRule_To_apiextensions_ValidationRule(in *Validatio out.MessageExpression = in.MessageExpression out.Reason = (*apiextensions.FieldValueErrorReason)(unsafe.Pointer(in.Reason)) out.FieldPath = in.FieldPath + out.OptionalOldSelf = (*bool)(unsafe.Pointer(in.OptionalOldSelf)) return nil } @@ -1275,6 +1276,7 @@ func autoConvert_apiextensions_ValidationRule_To_v1_ValidationRule(in *apiextens out.MessageExpression = in.MessageExpression out.Reason = (*FieldValueErrorReason)(unsafe.Pointer(in.Reason)) out.FieldPath = in.FieldPath + out.OptionalOldSelf = (*bool)(unsafe.Pointer(in.OptionalOldSelf)) return nil } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go index b4347b8db1..bc23fcd86f 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go @@ -619,6 +619,11 @@ func (in *ValidationRule) DeepCopyInto(out *ValidationRule) { *out = new(FieldValueErrorReason) **out = **in } + if in.OptionalOldSelf != nil { + in, out := &in.OptionalOldSelf, &out.OptionalOldSelf + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index 981f6adbdb..c81fa6bc31 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -785,204 +785,206 @@ func init() { } var fileDescriptor_98a4cc6918394e53 = []byte{ - // 3144 bytes of a gzipped FileDescriptorProto + // 3170 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0x1c, 0x47, - 0xd9, 0xf7, 0xec, 0x6a, 0xa5, 0x55, 0x4b, 0xb6, 0xa5, 0xb6, 0xa5, 0x8c, 0x15, 0x47, 0x2b, 0xaf, - 0xdf, 0xf8, 0x55, 0x12, 0x67, 0x95, 0xf8, 0x4d, 0xde, 0x84, 0x40, 0x8a, 0xd2, 0x4a, 0x72, 0x50, - 0x62, 0x59, 0xa2, 0xd7, 0x76, 0x04, 0xf9, 0x1c, 0xed, 0xf4, 0xae, 0x26, 0x9a, 0x9d, 0x19, 0x77, - 0xcf, 0xac, 0xa4, 0x0a, 0x50, 0x7c, 0x54, 0x0a, 0x8a, 0x02, 0x42, 0x91, 0x5c, 0x28, 0xe0, 0x10, - 0x28, 0x2e, 0x1c, 0xe0, 0x00, 0x37, 0xf8, 0x03, 0x72, 0x4c, 0x51, 0x1c, 0x72, 0xa0, 0x16, 0xb2, - 0x5c, 0x39, 0x52, 0x45, 0x95, 0x4e, 0x54, 0x7f, 0x4c, 0x4f, 0xef, 0xec, 0xae, 0xed, 0x8a, 0x76, - 0x63, 0x6e, 0xbb, 0xcf, 0xd7, 0xef, 0x99, 0xa7, 0x9f, 0x7e, 0xfa, 0xe9, 0x67, 0x06, 0xd4, 0xf6, - 0x9e, 0xa5, 0x25, 0xc7, 0x5f, 0xda, 0x8b, 0x76, 0x30, 0xf1, 0x70, 0x88, 0xe9, 0x52, 0x13, 0x7b, - 0xb6, 0x4f, 0x96, 0x24, 0xc3, 0x0a, 0x1c, 0x7c, 0x10, 0x62, 0x8f, 0x3a, 0xbe, 0x47, 0x1f, 0xb7, - 0x02, 0x87, 0x62, 0xd2, 0xc4, 0x64, 0x29, 0xd8, 0xab, 0x33, 0x1e, 0xed, 0x14, 0x58, 0x6a, 0x3e, - 0xb9, 0x83, 0x43, 0xeb, 0xc9, 0xa5, 0x3a, 0xf6, 0x30, 0xb1, 0x42, 0x6c, 0x97, 0x02, 0xe2, 0x87, - 0x3e, 0x7c, 0x5e, 0x98, 0x2b, 0x75, 0x48, 0xbf, 0xa1, 0xcc, 0x95, 0x82, 0xbd, 0x3a, 0xe3, 0xd1, - 0x4e, 0x81, 0x92, 0x34, 0x37, 0xf7, 0x78, 0xdd, 0x09, 0x77, 0xa3, 0x9d, 0x52, 0xd5, 0x6f, 0x2c, - 0xd5, 0xfd, 0xba, 0xbf, 0xc4, 0xad, 0xee, 0x44, 0x35, 0xfe, 0x8f, 0xff, 0xe1, 0xbf, 0x04, 0xda, - 0xdc, 0x53, 0x89, 0xf3, 0x0d, 0xab, 0xba, 0xeb, 0x78, 0x98, 0x1c, 0x26, 0x1e, 0x37, 0x70, 0x68, - 0x2d, 0x35, 0xbb, 0x7c, 0x9c, 0x5b, 0xea, 0xa7, 0x45, 0x22, 0x2f, 0x74, 0x1a, 0xb8, 0x4b, 0xe1, - 0xff, 0xef, 0xa6, 0x40, 0xab, 0xbb, 0xb8, 0x61, 0xa5, 0xf5, 0x8a, 0x47, 0x06, 0x98, 0x5e, 0xf1, - 0xbd, 0x26, 0x26, 0xec, 0x29, 0x11, 0xbe, 0x1d, 0x61, 0x1a, 0xc2, 0x32, 0xc8, 0x46, 0x8e, 0x6d, - 0x1a, 0x0b, 0xc6, 0xe2, 0x78, 0xf9, 0x89, 0x0f, 0x5b, 0x85, 0x13, 0xed, 0x56, 0x21, 0x7b, 0x73, - 0x7d, 0xf5, 0xa8, 0x55, 0xb8, 0xd0, 0x0f, 0x29, 0x3c, 0x0c, 0x30, 0x2d, 0xdd, 0x5c, 0x5f, 0x45, - 0x4c, 0x19, 0xbe, 0x00, 0xa6, 0x6d, 0x4c, 0x1d, 0x82, 0xed, 0xe5, 0xad, 0xf5, 0x5b, 0xc2, 0xbe, - 0x99, 0xe1, 0x16, 0xcf, 0x49, 0x8b, 0xd3, 0xab, 0x69, 0x01, 0xd4, 0xad, 0x03, 0xb7, 0xc1, 0x98, - 0xbf, 0xf3, 0x16, 0xae, 0x86, 0xd4, 0xcc, 0x2e, 0x64, 0x17, 0x27, 0xae, 0x3c, 0x5e, 0x4a, 0x56, - 0x50, 0xb9, 0xc0, 0x97, 0x4d, 0x3e, 0x6c, 0x09, 0x59, 0xfb, 0x6b, 0xf1, 0xca, 0x95, 0x4f, 0x4b, - 0xb4, 0xb1, 0x4d, 0x61, 0x05, 0xc5, 0xe6, 0x8a, 0xbf, 0xca, 0x00, 0xa8, 0x3f, 0x3c, 0x0d, 0x7c, - 0x8f, 0xe2, 0x81, 0x3c, 0x3d, 0x05, 0x53, 0x55, 0x6e, 0x39, 0xc4, 0xb6, 0xc4, 0x35, 0x33, 0x9f, - 0xc6, 0x7b, 0x53, 0xe2, 0x4f, 0xad, 0xa4, 0xcc, 0xa1, 0x2e, 0x00, 0x78, 0x03, 0x8c, 0x12, 0x4c, - 0x23, 0x37, 0x34, 0xb3, 0x0b, 0xc6, 0xe2, 0xc4, 0x95, 0xcb, 0x7d, 0xa1, 0x78, 0x7e, 0xb3, 0xe4, - 0x2b, 0x35, 0x9f, 0x2c, 0x55, 0x42, 0x2b, 0x8c, 0x68, 0xf9, 0x94, 0x44, 0x1a, 0x45, 0xdc, 0x06, - 0x92, 0xb6, 0x8a, 0xdf, 0xcb, 0x80, 0x29, 0x3d, 0x4a, 0x4d, 0x07, 0xef, 0xc3, 0x7d, 0x30, 0x46, - 0x44, 0xb2, 0xf0, 0x38, 0x4d, 0x5c, 0xd9, 0x2a, 0x1d, 0x6b, 0x5b, 0x95, 0xba, 0x92, 0xb0, 0x3c, - 0xc1, 0xd6, 0x4c, 0xfe, 0x41, 0x31, 0x1a, 0x7c, 0x1b, 0xe4, 0x89, 0x5c, 0x28, 0x9e, 0x4d, 0x13, - 0x57, 0xbe, 0x3c, 0x40, 0x64, 0x61, 0xb8, 0x3c, 0xd9, 0x6e, 0x15, 0xf2, 0xf1, 0x3f, 0xa4, 0x00, - 0x8b, 0xef, 0x65, 0xc0, 0xfc, 0x4a, 0x44, 0x43, 0xbf, 0x81, 0x30, 0xf5, 0x23, 0x52, 0xc5, 0x2b, - 0xbe, 0x1b, 0x35, 0xbc, 0x55, 0x5c, 0x73, 0x3c, 0x27, 0x64, 0xd9, 0xba, 0x00, 0x46, 0x3c, 0xab, - 0x81, 0x65, 0xf6, 0x4c, 0xca, 0x98, 0x8e, 0x5c, 0xb7, 0x1a, 0x18, 0x71, 0x0e, 0x93, 0x60, 0xc9, - 0x22, 0xf7, 0x82, 0x92, 0xb8, 0x71, 0x18, 0x60, 0xc4, 0x39, 0xf0, 0x12, 0x18, 0xad, 0xf9, 0xa4, - 0x61, 0x89, 0x75, 0x1c, 0x4f, 0x56, 0xe6, 0x2a, 0xa7, 0x22, 0xc9, 0x85, 0x4f, 0x83, 0x09, 0x1b, - 0xd3, 0x2a, 0x71, 0x02, 0x06, 0x6d, 0x8e, 0x70, 0xe1, 0x33, 0x52, 0x78, 0x62, 0x35, 0x61, 0x21, - 0x5d, 0x0e, 0x5e, 0x06, 0xf9, 0x80, 0x38, 0x3e, 0x71, 0xc2, 0x43, 0x33, 0xb7, 0x60, 0x2c, 0xe6, - 0xca, 0x53, 0x52, 0x27, 0xbf, 0x25, 0xe9, 0x48, 0x49, 0xc0, 0x05, 0x90, 0x7f, 0xb1, 0xb2, 0x79, - 0x7d, 0xcb, 0x0a, 0x77, 0xcd, 0x51, 0x8e, 0x30, 0xc2, 0xa4, 0x91, 0xa2, 0x16, 0xff, 0x9a, 0x01, - 0x66, 0x3a, 0x2a, 0x71, 0x48, 0xe1, 0x55, 0x90, 0xa7, 0x21, 0xab, 0x38, 0xf5, 0x43, 0x19, 0x93, - 0x47, 0x63, 0xb0, 0x8a, 0xa4, 0x1f, 0xb5, 0x0a, 0xb3, 0x89, 0x46, 0x4c, 0xe5, 0xf1, 0x50, 0xba, - 0xf0, 0x17, 0x06, 0x38, 0xb3, 0x8f, 0x77, 0x76, 0x7d, 0x7f, 0x6f, 0xc5, 0x75, 0xb0, 0x17, 0xae, - 0xf8, 0x5e, 0xcd, 0xa9, 0xcb, 0x1c, 0x40, 0xc7, 0xcc, 0x81, 0x97, 0xbb, 0x2d, 0x97, 0x1f, 0x68, - 0xb7, 0x0a, 0x67, 0x7a, 0x30, 0x50, 0x2f, 0x3f, 0xe0, 0x36, 0x30, 0xab, 0xa9, 0x4d, 0x22, 0x0b, - 0x98, 0x28, 0x5b, 0xe3, 0xe5, 0xf3, 0xed, 0x56, 0xc1, 0x5c, 0xe9, 0x23, 0x83, 0xfa, 0x6a, 0x17, - 0xbf, 0x93, 0x4d, 0x87, 0x57, 0x4b, 0xb7, 0x37, 0x41, 0x9e, 0x6d, 0x63, 0xdb, 0x0a, 0x2d, 0xb9, - 0x11, 0x9f, 0xb8, 0xb7, 0x4d, 0x2f, 0x6a, 0xc6, 0x06, 0x0e, 0xad, 0x32, 0x94, 0x0b, 0x02, 0x12, - 0x1a, 0x52, 0x56, 0xe1, 0xd7, 0xc1, 0x08, 0x0d, 0x70, 0x55, 0x06, 0xfa, 0x95, 0xe3, 0x6e, 0xb6, - 0x3e, 0x0f, 0x52, 0x09, 0x70, 0x35, 0xd9, 0x0b, 0xec, 0x1f, 0xe2, 0xb0, 0xf0, 0x1d, 0x03, 0x8c, - 0x52, 0x5e, 0xa0, 0x64, 0x51, 0x7b, 0x6d, 0x58, 0x1e, 0xa4, 0xaa, 0xa0, 0xf8, 0x8f, 0x24, 0x78, - 0xf1, 0x5f, 0x19, 0x70, 0xa1, 0x9f, 0xea, 0x8a, 0xef, 0xd9, 0x62, 0x39, 0xd6, 0xe5, 0xde, 0x16, - 0x99, 0xfe, 0xb4, 0xbe, 0xb7, 0x8f, 0x5a, 0x85, 0x87, 0xef, 0x6a, 0x40, 0x2b, 0x02, 0x9f, 0x53, - 0xcf, 0x2d, 0x0a, 0xc5, 0x85, 0x4e, 0xc7, 0x8e, 0x5a, 0x85, 0xd3, 0x4a, 0xad, 0xd3, 0x57, 0xd8, - 0x04, 0xd0, 0xb5, 0x68, 0x78, 0x83, 0x58, 0x1e, 0x15, 0x66, 0x9d, 0x06, 0x96, 0xe1, 0x7b, 0xf4, - 0xde, 0xd2, 0x83, 0x69, 0x94, 0xe7, 0x24, 0x24, 0xbc, 0xd6, 0x65, 0x0d, 0xf5, 0x40, 0x60, 0x75, - 0x8b, 0x60, 0x8b, 0xaa, 0x52, 0xa4, 0x9d, 0x28, 0x8c, 0x8a, 0x24, 0x17, 0x3e, 0x02, 0xc6, 0x1a, - 0x98, 0x52, 0xab, 0x8e, 0x79, 0xfd, 0x19, 0x4f, 0x8e, 0xe8, 0x0d, 0x41, 0x46, 0x31, 0x9f, 0xf5, - 0x27, 0xe7, 0xfb, 0x45, 0xed, 0x9a, 0x43, 0x43, 0xf8, 0x6a, 0xd7, 0x06, 0x28, 0xdd, 0xdb, 0x13, - 0x32, 0x6d, 0x9e, 0xfe, 0xaa, 0xf8, 0xc5, 0x14, 0x2d, 0xf9, 0xbf, 0x06, 0x72, 0x4e, 0x88, 0x1b, - 0xf1, 0xd9, 0xfd, 0xf2, 0x90, 0x72, 0xaf, 0x7c, 0x52, 0xfa, 0x90, 0x5b, 0x67, 0x68, 0x48, 0x80, - 0x16, 0x7f, 0x9d, 0x01, 0x0f, 0xf5, 0x53, 0x61, 0x07, 0x0a, 0x65, 0x11, 0x0f, 0xdc, 0x88, 0x58, - 0xae, 0xcc, 0x38, 0x15, 0xf1, 0x2d, 0x4e, 0x45, 0x92, 0xcb, 0x4a, 0x3e, 0x75, 0xbc, 0x7a, 0xe4, - 0x5a, 0x44, 0xa6, 0x93, 0x7a, 0xea, 0x8a, 0xa4, 0x23, 0x25, 0x01, 0x4b, 0x00, 0xd0, 0x5d, 0x9f, - 0x84, 0x1c, 0x43, 0x56, 0xaf, 0x53, 0xac, 0x40, 0x54, 0x14, 0x15, 0x69, 0x12, 0xec, 0x44, 0xdb, - 0x73, 0x3c, 0x5b, 0xae, 0xba, 0xda, 0xc5, 0x2f, 0x39, 0x9e, 0x8d, 0x38, 0x87, 0xe1, 0xbb, 0x0e, - 0x0d, 0x19, 0x45, 0x2e, 0x79, 0x47, 0xd4, 0xb9, 0xa4, 0x92, 0x60, 0xf8, 0x55, 0x56, 0xf5, 0x7d, - 0xe2, 0x60, 0x6a, 0x8e, 0x26, 0xf8, 0x2b, 0x8a, 0x8a, 0x34, 0x89, 0xe2, 0x3f, 0xf3, 0xfd, 0x93, - 0x84, 0x95, 0x12, 0x78, 0x11, 0xe4, 0xea, 0xc4, 0x8f, 0x02, 0x19, 0x25, 0x15, 0xed, 0x17, 0x18, - 0x11, 0x09, 0x1e, 0xcb, 0xca, 0x66, 0x47, 0x9b, 0xaa, 0xb2, 0x32, 0x6e, 0x4e, 0x63, 0x3e, 0xfc, - 0x96, 0x01, 0x72, 0x9e, 0x0c, 0x0e, 0x4b, 0xb9, 0x57, 0x87, 0x94, 0x17, 0x3c, 0xbc, 0x89, 0xbb, - 0x22, 0xf2, 0x02, 0x19, 0x3e, 0x05, 0x72, 0xb4, 0xea, 0x07, 0x58, 0x46, 0x7d, 0x3e, 0x16, 0xaa, - 0x30, 0xe2, 0x51, 0xab, 0x70, 0x32, 0x36, 0xc7, 0x09, 0x48, 0x08, 0xc3, 0xef, 0x1a, 0x00, 0x34, - 0x2d, 0xd7, 0xb1, 0x2d, 0xde, 0x32, 0xe4, 0xb8, 0xfb, 0x83, 0x4d, 0xeb, 0x5b, 0xca, 0xbc, 0x58, - 0xb4, 0xe4, 0x3f, 0xd2, 0xa0, 0xe1, 0xbb, 0x06, 0x98, 0xa4, 0xd1, 0x0e, 0x91, 0x5a, 0x94, 0x37, - 0x17, 0x13, 0x57, 0xbe, 0x32, 0x50, 0x5f, 0x2a, 0x1a, 0x40, 0x79, 0xaa, 0xdd, 0x2a, 0x4c, 0xea, - 0x14, 0xd4, 0xe1, 0x00, 0xfc, 0x81, 0x01, 0xf2, 0xcd, 0xf8, 0xcc, 0x1e, 0xe3, 0x1b, 0xfe, 0xf5, - 0x21, 0x2d, 0xac, 0xcc, 0xa8, 0x64, 0x17, 0xa8, 0x3e, 0x40, 0x79, 0x00, 0xff, 0x68, 0x00, 0xd3, - 0xb2, 0x45, 0x81, 0xb7, 0xdc, 0x2d, 0xe2, 0x78, 0x21, 0x26, 0xa2, 0xdf, 0xa4, 0x66, 0x9e, 0xbb, - 0x37, 0xd8, 0xb3, 0x30, 0xdd, 0xcb, 0x96, 0x17, 0xa4, 0x77, 0xe6, 0x72, 0x1f, 0x37, 0x50, 0x5f, - 0x07, 0x79, 0xa2, 0x25, 0x2d, 0x8d, 0x39, 0x3e, 0x84, 0x44, 0x4b, 0x7a, 0x29, 0x59, 0x1d, 0x92, - 0x0e, 0x4a, 0x83, 0x86, 0x9b, 0x60, 0x26, 0x20, 0x98, 0x03, 0xdc, 0xf4, 0xf6, 0x3c, 0x7f, 0xdf, - 0xbb, 0xea, 0x60, 0xd7, 0xa6, 0x26, 0x58, 0x30, 0x16, 0xf3, 0xe5, 0x73, 0xed, 0x56, 0x61, 0x66, - 0xab, 0x97, 0x00, 0xea, 0xad, 0x57, 0x7c, 0x37, 0x9b, 0xbe, 0x05, 0xa4, 0xbb, 0x08, 0xf8, 0xbe, - 0x78, 0x7a, 0x11, 0x1b, 0x6a, 0x1a, 0x7c, 0xb5, 0xde, 0x1c, 0x52, 0x32, 0xa9, 0x36, 0x20, 0xe9, - 0xe4, 0x14, 0x89, 0x22, 0xcd, 0x0f, 0xf8, 0x53, 0x03, 0x9c, 0xb4, 0xaa, 0x55, 0x1c, 0x84, 0xd8, - 0x16, 0xc5, 0x3d, 0xf3, 0x19, 0xd4, 0xaf, 0x19, 0xe9, 0xd5, 0xc9, 0x65, 0x1d, 0x1a, 0x75, 0x7a, - 0x02, 0x9f, 0x03, 0xa7, 0x68, 0xe8, 0x13, 0x6c, 0xa7, 0xda, 0x66, 0xd8, 0x6e, 0x15, 0x4e, 0x55, - 0x3a, 0x38, 0x28, 0x25, 0x59, 0xfc, 0x5b, 0x0e, 0x14, 0xee, 0xb2, 0xd5, 0xee, 0xe1, 0x62, 0x76, - 0x09, 0x8c, 0xf2, 0xc7, 0xb5, 0x79, 0x54, 0xf2, 0x5a, 0x2b, 0xc8, 0xa9, 0x48, 0x72, 0xd9, 0x41, - 0xc1, 0xf0, 0x59, 0xfb, 0x92, 0xe5, 0x82, 0xea, 0xa0, 0xa8, 0x08, 0x32, 0x8a, 0xf9, 0xf0, 0x0a, - 0x00, 0x36, 0x0e, 0x08, 0x66, 0x87, 0x95, 0x6d, 0x8e, 0x71, 0x69, 0xb5, 0x48, 0xab, 0x8a, 0x83, - 0x34, 0x29, 0x78, 0x15, 0xc0, 0xf8, 0x9f, 0xe3, 0x7b, 0x2f, 0x5b, 0xc4, 0x73, 0xbc, 0xba, 0x99, - 0xe7, 0x6e, 0xcf, 0xb2, 0x6e, 0x6c, 0xb5, 0x8b, 0x8b, 0x7a, 0x68, 0xc0, 0xb7, 0xc1, 0xa8, 0x18, - 0xfa, 0xf0, 0x13, 0x62, 0x88, 0x55, 0x1e, 0xf0, 0x18, 0x71, 0x28, 0x24, 0x21, 0xbb, 0xab, 0x7b, - 0xee, 0x7e, 0x57, 0xf7, 0x3b, 0x96, 0xd3, 0xd1, 0xff, 0xf2, 0x72, 0x5a, 0xfc, 0xb7, 0x91, 0xae, - 0x39, 0xda, 0xa3, 0x56, 0xaa, 0x96, 0x8b, 0xe1, 0x2a, 0x98, 0x62, 0x37, 0x26, 0x84, 0x03, 0xd7, - 0xa9, 0x5a, 0x94, 0x5f, 0xd8, 0x45, 0xb2, 0xab, 0x19, 0x52, 0x25, 0xc5, 0x47, 0x5d, 0x1a, 0xf0, - 0x45, 0x00, 0xc5, 0x2d, 0xa2, 0xc3, 0x8e, 0x68, 0x88, 0xd4, 0x7d, 0xa0, 0xd2, 0x25, 0x81, 0x7a, - 0x68, 0xc1, 0x15, 0x30, 0xed, 0x5a, 0x3b, 0xd8, 0xad, 0x60, 0x17, 0x57, 0x43, 0x9f, 0x70, 0x53, - 0x62, 0xa4, 0x31, 0xd3, 0x6e, 0x15, 0xa6, 0xaf, 0xa5, 0x99, 0xa8, 0x5b, 0xbe, 0x78, 0x21, 0xbd, - 0xb5, 0xf5, 0x07, 0x17, 0x77, 0xb3, 0x0f, 0x32, 0x60, 0xae, 0x7f, 0x66, 0xc0, 0x6f, 0x27, 0x57, - 0x48, 0x71, 0x43, 0x78, 0x7d, 0x58, 0x59, 0x28, 0xef, 0x90, 0xa0, 0xfb, 0xfe, 0x08, 0xbf, 0xc1, - 0xda, 0x35, 0xcb, 0x8d, 0x87, 0x56, 0xaf, 0x0d, 0xcd, 0x05, 0x06, 0x52, 0x1e, 0x17, 0x9d, 0xa0, - 0xe5, 0xf2, 0xc6, 0xcf, 0x72, 0x71, 0xf1, 0x37, 0x46, 0x7a, 0x8a, 0x90, 0xec, 0x60, 0xf8, 0x43, - 0x03, 0x9c, 0xf6, 0x03, 0xec, 0x2d, 0x6f, 0xad, 0xdf, 0xfa, 0x3f, 0xb1, 0x93, 0x65, 0xa8, 0xae, - 0x1f, 0xd3, 0xcf, 0x17, 0x2b, 0x9b, 0xd7, 0x85, 0xc1, 0x2d, 0xe2, 0x07, 0xb4, 0x7c, 0xa6, 0xdd, - 0x2a, 0x9c, 0xde, 0xec, 0x84, 0x42, 0x69, 0xec, 0x62, 0x03, 0xcc, 0xac, 0x1d, 0x84, 0x98, 0x78, - 0x96, 0xbb, 0xea, 0x57, 0xa3, 0x06, 0xf6, 0x42, 0xe1, 0x68, 0x6a, 0xe2, 0x65, 0xdc, 0xe3, 0xc4, - 0xeb, 0x21, 0x90, 0x8d, 0x88, 0x2b, 0xb3, 0x78, 0x42, 0x4d, 0x74, 0xd1, 0x35, 0xc4, 0xe8, 0xc5, - 0x0b, 0x60, 0x84, 0xf9, 0x09, 0xcf, 0x81, 0x2c, 0xb1, 0xf6, 0xb9, 0xd5, 0xc9, 0xf2, 0x18, 0x13, - 0x41, 0xd6, 0x3e, 0x62, 0xb4, 0xe2, 0x5f, 0x2e, 0x80, 0xd3, 0xa9, 0x67, 0x81, 0x73, 0x20, 0xa3, - 0xc6, 0xc4, 0x40, 0x1a, 0xcd, 0xac, 0xaf, 0xa2, 0x8c, 0x63, 0xc3, 0x67, 0x54, 0xf1, 0x15, 0xa0, - 0x05, 0x75, 0x96, 0x70, 0x2a, 0xeb, 0xcf, 0x13, 0x73, 0xcc, 0x91, 0xb8, 0x70, 0x32, 0x1f, 0x70, - 0x4d, 0xee, 0x12, 0xe1, 0x03, 0xae, 0x21, 0x46, 0xfb, 0xb4, 0xe3, 0xbe, 0x78, 0xde, 0x98, 0xbb, - 0x87, 0x79, 0xe3, 0xe8, 0x1d, 0xe7, 0x8d, 0x17, 0x41, 0x2e, 0x74, 0x42, 0x17, 0xf3, 0x83, 0x4c, - 0xbb, 0x46, 0xdd, 0x60, 0x44, 0x24, 0x78, 0xf0, 0x2d, 0x30, 0x66, 0xe3, 0x9a, 0x15, 0xb9, 0x21, - 0x3f, 0xb3, 0x26, 0xae, 0xac, 0x0c, 0x20, 0x85, 0xc4, 0x30, 0x78, 0x55, 0xd8, 0x45, 0x31, 0x00, - 0x7c, 0x18, 0x8c, 0x35, 0xac, 0x03, 0xa7, 0x11, 0x35, 0x78, 0x83, 0x69, 0x08, 0xb1, 0x0d, 0x41, - 0x42, 0x31, 0x8f, 0x55, 0x46, 0x7c, 0x50, 0x75, 0x23, 0xea, 0x34, 0xb1, 0x64, 0xca, 0xe6, 0x4f, - 0x55, 0xc6, 0xb5, 0x14, 0x1f, 0x75, 0x69, 0x70, 0x30, 0xc7, 0xe3, 0xca, 0x13, 0x1a, 0x98, 0x20, - 0xa1, 0x98, 0xd7, 0x09, 0x26, 0xe5, 0x27, 0xfb, 0x81, 0x49, 0xe5, 0x2e, 0x0d, 0xf8, 0x18, 0x18, - 0x6f, 0x58, 0x07, 0xd7, 0xb0, 0x57, 0x0f, 0x77, 0xcd, 0x93, 0x0b, 0xc6, 0x62, 0xb6, 0x7c, 0xb2, - 0xdd, 0x2a, 0x8c, 0x6f, 0xc4, 0x44, 0x94, 0xf0, 0xb9, 0xb0, 0xe3, 0x49, 0xe1, 0x53, 0x9a, 0x70, - 0x4c, 0x44, 0x09, 0x9f, 0x75, 0x2f, 0x81, 0x15, 0xb2, 0xcd, 0x65, 0x9e, 0xee, 0xbc, 0xe6, 0x6e, - 0x09, 0x32, 0x8a, 0xf9, 0x70, 0x11, 0xe4, 0x1b, 0xd6, 0x01, 0x1f, 0x49, 0x98, 0x53, 0xdc, 0x2c, - 0x1f, 0x8c, 0x6f, 0x48, 0x1a, 0x52, 0x5c, 0x2e, 0xe9, 0x78, 0x42, 0x72, 0x5a, 0x93, 0x94, 0x34, - 0xa4, 0xb8, 0x2c, 0x89, 0x23, 0xcf, 0xb9, 0x1d, 0x61, 0x21, 0x0c, 0x79, 0x64, 0x54, 0x12, 0xdf, - 0x4c, 0x58, 0x48, 0x97, 0x83, 0x25, 0x00, 0x1a, 0x91, 0x1b, 0x3a, 0x81, 0x8b, 0x37, 0x6b, 0xe6, - 0x19, 0x1e, 0x7f, 0xde, 0xf4, 0x6f, 0x28, 0x2a, 0xd2, 0x24, 0x20, 0x06, 0x23, 0xd8, 0x8b, 0x1a, - 0xe6, 0x59, 0x7e, 0xb0, 0x0f, 0x24, 0x05, 0xd5, 0xce, 0x59, 0xf3, 0xa2, 0x06, 0xe2, 0xe6, 0xe1, - 0x33, 0xe0, 0x64, 0xc3, 0x3a, 0x60, 0xe5, 0x00, 0x93, 0xd0, 0xc1, 0xd4, 0x9c, 0xe1, 0x0f, 0x3f, - 0xcd, 0xba, 0xdd, 0x0d, 0x9d, 0x81, 0x3a, 0xe5, 0xb8, 0xa2, 0xe3, 0x69, 0x8a, 0xb3, 0x9a, 0xa2, - 0xce, 0x40, 0x9d, 0x72, 0x2c, 0xd2, 0x04, 0xdf, 0x8e, 0x1c, 0x82, 0x6d, 0xf3, 0x01, 0xde, 0x20, - 0xcb, 0x97, 0x15, 0x82, 0x86, 0x14, 0x17, 0x36, 0xe3, 0xd9, 0x95, 0xc9, 0xb7, 0xe1, 0xcd, 0xc1, - 0x56, 0xf2, 0x4d, 0xb2, 0x4c, 0x88, 0x75, 0x28, 0x4e, 0x1a, 0x7d, 0x6a, 0x05, 0x29, 0xc8, 0x59, - 0xae, 0xbb, 0x59, 0x33, 0xcf, 0xf1, 0xd8, 0x0f, 0xfa, 0x04, 0x51, 0x55, 0x67, 0x99, 0x81, 0x20, - 0x81, 0xc5, 0x40, 0x7d, 0x8f, 0xa5, 0xc6, 0xdc, 0x70, 0x41, 0x37, 0x19, 0x08, 0x12, 0x58, 0xfc, - 0x49, 0xbd, 0xc3, 0xcd, 0x9a, 0xf9, 0xe0, 0x90, 0x9f, 0x94, 0x81, 0x20, 0x81, 0x05, 0x1d, 0x90, - 0xf5, 0xfc, 0xd0, 0x3c, 0x3f, 0x94, 0xe3, 0x99, 0x1f, 0x38, 0xd7, 0xfd, 0x10, 0x31, 0x0c, 0xf8, - 0x13, 0x03, 0x80, 0x20, 0x49, 0xd1, 0x87, 0x06, 0x32, 0x12, 0x49, 0x41, 0x96, 0x92, 0xdc, 0x5e, - 0xf3, 0x42, 0x72, 0x98, 0x5c, 0x8f, 0xb4, 0x3d, 0xa0, 0x79, 0x01, 0x7f, 0x69, 0x80, 0xb3, 0x7a, - 0x9b, 0xac, 0xdc, 0x9b, 0xe7, 0x11, 0xb9, 0x31, 0xe8, 0x34, 0x2f, 0xfb, 0xbe, 0x5b, 0x36, 0xdb, - 0xad, 0xc2, 0xd9, 0xe5, 0x1e, 0xa8, 0xa8, 0xa7, 0x2f, 0xf0, 0xb7, 0x06, 0x98, 0x96, 0x55, 0x54, - 0xf3, 0xb0, 0xc0, 0x03, 0x88, 0x07, 0x1d, 0xc0, 0x34, 0x8e, 0x88, 0xa3, 0x7a, 0xc9, 0xde, 0xc5, - 0x47, 0xdd, 0xae, 0xc1, 0x3f, 0x18, 0x60, 0xd2, 0xc6, 0x01, 0xf6, 0x6c, 0xec, 0x55, 0x99, 0xaf, - 0x0b, 0x03, 0x19, 0x59, 0xa4, 0x7d, 0x5d, 0xd5, 0x20, 0x84, 0x9b, 0x25, 0xe9, 0xe6, 0xa4, 0xce, - 0x3a, 0x6a, 0x15, 0x66, 0x13, 0x55, 0x9d, 0x83, 0x3a, 0xbc, 0x84, 0xef, 0x19, 0xe0, 0x74, 0xb2, - 0x00, 0xe2, 0x48, 0xb9, 0x30, 0xc4, 0x3c, 0xe0, 0xed, 0xeb, 0x72, 0x27, 0x20, 0x4a, 0x7b, 0x00, - 0x7f, 0x67, 0xb0, 0x4e, 0x2d, 0xbe, 0xf7, 0x51, 0xb3, 0xc8, 0x63, 0xf9, 0xc6, 0xc0, 0x63, 0xa9, - 0x10, 0x44, 0x28, 0x2f, 0x27, 0xad, 0xa0, 0xe2, 0x1c, 0xb5, 0x0a, 0x33, 0x7a, 0x24, 0x15, 0x03, - 0xe9, 0x1e, 0xc2, 0xef, 0x1b, 0x60, 0x12, 0x27, 0x1d, 0x37, 0x35, 0x2f, 0x0e, 0x24, 0x88, 0x3d, - 0x9b, 0x78, 0x71, 0x53, 0xd7, 0x58, 0x14, 0x75, 0x60, 0xb3, 0x0e, 0x12, 0x1f, 0x58, 0x8d, 0xc0, - 0xc5, 0xe6, 0xff, 0x0c, 0xb8, 0x83, 0x5c, 0x13, 0x76, 0x51, 0x0c, 0x00, 0x2f, 0x83, 0xbc, 0x17, - 0xb9, 0xae, 0xb5, 0xe3, 0x62, 0xf3, 0x61, 0xde, 0x8b, 0xa8, 0x91, 0xec, 0x75, 0x49, 0x47, 0x4a, - 0x02, 0xd6, 0xc0, 0xc2, 0xc1, 0x4b, 0xea, 0xf3, 0xa4, 0x9e, 0x43, 0x43, 0xf3, 0x12, 0xb7, 0x32, - 0xd7, 0x6e, 0x15, 0x66, 0xb7, 0x7b, 0x8f, 0x15, 0xef, 0x6a, 0x03, 0xbe, 0x02, 0x1e, 0xd4, 0x64, - 0xd6, 0x1a, 0x3b, 0xd8, 0xb6, 0xb1, 0x1d, 0x5f, 0xdc, 0xcc, 0xff, 0x15, 0x83, 0xcb, 0x78, 0x83, - 0x6f, 0xa7, 0x05, 0xd0, 0x9d, 0xb4, 0xe1, 0x35, 0x30, 0xab, 0xb1, 0xd7, 0xbd, 0x70, 0x93, 0x54, - 0x42, 0xe2, 0x78, 0x75, 0x73, 0x91, 0xdb, 0x3d, 0x1b, 0xef, 0xc8, 0x6d, 0x8d, 0x87, 0xfa, 0xe8, - 0xc0, 0x2f, 0x75, 0x58, 0xe3, 0xaf, 0xd0, 0xac, 0xe0, 0x25, 0x7c, 0x48, 0xcd, 0x47, 0x78, 0x77, - 0xc2, 0x17, 0x7b, 0x5b, 0xa3, 0xa3, 0x3e, 0xf2, 0xf0, 0x8b, 0xe0, 0x4c, 0x8a, 0xc3, 0xae, 0x28, - 0xe6, 0xa3, 0xe2, 0xae, 0xc1, 0xfa, 0xd9, 0xed, 0x98, 0x88, 0x7a, 0x49, 0xc2, 0x2f, 0x00, 0xa8, - 0x91, 0x37, 0xac, 0x80, 0xeb, 0x3f, 0x26, 0xae, 0x3d, 0x6c, 0x45, 0xb7, 0x25, 0x0d, 0xf5, 0x90, - 0x83, 0x3f, 0x33, 0x3a, 0x9e, 0x24, 0xb9, 0x1d, 0x53, 0xf3, 0x32, 0xdf, 0xbf, 0x1b, 0xc7, 0xcc, - 0x42, 0xed, 0x3d, 0x48, 0xe4, 0x62, 0x2d, 0xcc, 0x1a, 0x14, 0xea, 0xe3, 0xc2, 0x1c, 0xbb, 0xa1, - 0xa7, 0x2a, 0x3c, 0x9c, 0x02, 0xd9, 0x3d, 0x2c, 0xbf, 0xaa, 0x40, 0xec, 0x27, 0xb4, 0x41, 0xae, - 0x69, 0xb9, 0x51, 0x3c, 0x64, 0x18, 0x70, 0x77, 0x80, 0x84, 0xf1, 0xe7, 0x32, 0xcf, 0x1a, 0x73, - 0xef, 0x1b, 0x60, 0xb6, 0xf7, 0xc1, 0x73, 0x5f, 0xdd, 0xfa, 0xb9, 0x01, 0xa6, 0xbb, 0xce, 0x98, - 0x1e, 0x1e, 0xdd, 0xee, 0xf4, 0xe8, 0x95, 0x41, 0x1f, 0x16, 0x62, 0x73, 0xf0, 0x0e, 0x59, 0x77, - 0xef, 0x47, 0x06, 0x98, 0x4a, 0x97, 0xed, 0xfb, 0x19, 0xaf, 0xe2, 0xfb, 0x19, 0x30, 0xdb, 0xbb, - 0xb1, 0x87, 0x44, 0x4d, 0x30, 0x86, 0x33, 0x09, 0xea, 0x35, 0x35, 0x7e, 0xc7, 0x00, 0x13, 0x6f, - 0x29, 0xb9, 0xf8, 0xad, 0xfb, 0xc0, 0x67, 0x50, 0xf1, 0x39, 0x99, 0x30, 0x28, 0xd2, 0x71, 0x8b, - 0xbf, 0x37, 0xc0, 0x4c, 0xcf, 0x06, 0x00, 0x5e, 0x02, 0xa3, 0x96, 0xeb, 0xfa, 0xfb, 0x62, 0x94, - 0xa8, 0xbd, 0x23, 0x58, 0xe6, 0x54, 0x24, 0xb9, 0x5a, 0xf4, 0x32, 0x9f, 0x55, 0xf4, 0x8a, 0x7f, - 0x32, 0xc0, 0xf9, 0x3b, 0x65, 0xe2, 0x7d, 0x59, 0xd2, 0x45, 0x90, 0x97, 0xcd, 0xfb, 0x21, 0x5f, - 0x4e, 0x59, 0x8a, 0x65, 0xd1, 0xe0, 0x1f, 0x9a, 0x89, 0x5f, 0xc5, 0x0f, 0x0c, 0x30, 0x55, 0xc1, - 0xa4, 0xe9, 0x54, 0x31, 0xc2, 0x35, 0x4c, 0xb0, 0x57, 0xc5, 0x70, 0x09, 0x8c, 0xf3, 0xd7, 0xdd, - 0x81, 0x55, 0x8d, 0x5f, 0xdd, 0x4c, 0xcb, 0x90, 0x8f, 0x5f, 0x8f, 0x19, 0x28, 0x91, 0x51, 0xaf, - 0x79, 0x32, 0x7d, 0x5f, 0xf3, 0x9c, 0x07, 0x23, 0x41, 0x32, 0x88, 0xce, 0x33, 0x2e, 0x9f, 0x3d, - 0x73, 0x2a, 0xe7, 0xfa, 0x24, 0xe4, 0xd3, 0xb5, 0x9c, 0xe4, 0xfa, 0x24, 0x44, 0x9c, 0xca, 0xf6, - 0xcb, 0xa9, 0xce, 0x3a, 0xce, 0x00, 0x49, 0xe4, 0x76, 0xbd, 0x57, 0x62, 0x3c, 0xc4, 0x39, 0xfa, - 0xe7, 0x2e, 0x99, 0x3b, 0x7f, 0xee, 0x02, 0x5f, 0x00, 0xd3, 0xf2, 0xe7, 0xda, 0x41, 0x40, 0x30, - 0xe5, 0xef, 0x4e, 0xb3, 0x9d, 0x1f, 0xcd, 0x6e, 0xa4, 0x05, 0x50, 0xb7, 0x0e, 0xfc, 0x7c, 0xea, - 0x53, 0x9c, 0x8b, 0xc9, 0x67, 0x38, 0xac, 0x25, 0xe4, 0x7d, 0xc6, 0x2d, 0x56, 0x06, 0xd6, 0x08, - 0xf1, 0x49, 0xea, 0xfb, 0x9c, 0x25, 0x30, 0x5e, 0x63, 0x02, 0x7c, 0x5e, 0x9f, 0xeb, 0x0c, 0xfa, - 0xd5, 0x98, 0x81, 0x12, 0x99, 0xe2, 0x9f, 0x0d, 0xd0, 0xeb, 0x4b, 0x39, 0x78, 0x4e, 0xcc, 0x5d, - 0xb5, 0x61, 0x66, 0x3c, 0x73, 0x85, 0x4d, 0x30, 0x46, 0xc5, 0x62, 0xcb, 0x64, 0xdc, 0x3c, 0x66, - 0x32, 0xa6, 0x53, 0x47, 0x34, 0x7c, 0x31, 0x35, 0x06, 0x63, 0xf9, 0x58, 0xb5, 0xca, 0x91, 0x67, - 0xcb, 0x51, 0xfc, 0xa4, 0xc8, 0xc7, 0x95, 0x65, 0x41, 0x43, 0x8a, 0x5b, 0xae, 0x7e, 0xf8, 0xc9, - 0xfc, 0x89, 0x8f, 0x3e, 0x99, 0x3f, 0xf1, 0xf1, 0x27, 0xf3, 0x27, 0xbe, 0xd9, 0x9e, 0x37, 0x3e, - 0x6c, 0xcf, 0x1b, 0x1f, 0xb5, 0xe7, 0x8d, 0x8f, 0xdb, 0xf3, 0xc6, 0xdf, 0xdb, 0xf3, 0xc6, 0x8f, - 0xff, 0x31, 0x7f, 0xe2, 0xab, 0xcf, 0x1f, 0xeb, 0xe3, 0xf4, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, - 0xde, 0xe9, 0x15, 0xbf, 0xf5, 0x2e, 0x00, 0x00, + 0xd9, 0xf7, 0xec, 0x6a, 0xa5, 0x55, 0x4b, 0xb6, 0xa4, 0xb6, 0xa5, 0x8c, 0x15, 0x47, 0x2b, 0xaf, + 0xdf, 0xf8, 0x55, 0x12, 0x67, 0x95, 0xf8, 0x4d, 0xde, 0xe4, 0xcd, 0x4b, 0x8a, 0xd2, 0x4a, 0x72, + 0x50, 0x62, 0x59, 0xa2, 0xd7, 0x76, 0x04, 0xf9, 0x1c, 0xed, 0xf4, 0xae, 0x27, 0x9a, 0x9d, 0x19, + 0x77, 0xcf, 0xac, 0xa4, 0x0a, 0x50, 0x7c, 0x54, 0x0a, 0x8a, 0x02, 0x42, 0x91, 0x5c, 0x28, 0xe0, + 0x10, 0x28, 0x2e, 0x1c, 0xe0, 0x00, 0x37, 0xf8, 0x03, 0x72, 0x4c, 0x01, 0x87, 0x1c, 0xa8, 0x85, + 0x2c, 0x57, 0x8e, 0x54, 0x51, 0xa5, 0x13, 0xd5, 0x1f, 0xd3, 0xd3, 0x3b, 0xbb, 0x6b, 0xbb, 0xa2, + 0xdd, 0x98, 0xdb, 0xee, 0xf3, 0xf5, 0x7b, 0xe6, 0xe9, 0xa7, 0x9f, 0x7e, 0xfa, 0x99, 0x01, 0xb5, + 0xbd, 0x67, 0x69, 0xc9, 0xf1, 0x97, 0xf7, 0xa2, 0x5d, 0x4c, 0x3c, 0x1c, 0x62, 0xba, 0xdc, 0xc4, + 0x9e, 0xed, 0x93, 0x65, 0xc9, 0xb0, 0x02, 0x07, 0x1f, 0x84, 0xd8, 0xa3, 0x8e, 0xef, 0xd1, 0xc7, + 0xad, 0xc0, 0xa1, 0x98, 0x34, 0x31, 0x59, 0x0e, 0xf6, 0xea, 0x8c, 0x47, 0x3b, 0x05, 0x96, 0x9b, + 0x4f, 0xee, 0xe2, 0xd0, 0x7a, 0x72, 0xb9, 0x8e, 0x3d, 0x4c, 0xac, 0x10, 0xdb, 0xa5, 0x80, 0xf8, + 0xa1, 0x0f, 0x9f, 0x17, 0xe6, 0x4a, 0x1d, 0xd2, 0x6f, 0x28, 0x73, 0xa5, 0x60, 0xaf, 0xce, 0x78, + 0xb4, 0x53, 0xa0, 0x24, 0xcd, 0xcd, 0x3f, 0x5e, 0x77, 0xc2, 0x5b, 0xd1, 0x6e, 0xa9, 0xea, 0x37, + 0x96, 0xeb, 0x7e, 0xdd, 0x5f, 0xe6, 0x56, 0x77, 0xa3, 0x1a, 0xff, 0xc7, 0xff, 0xf0, 0x5f, 0x02, + 0x6d, 0xfe, 0xa9, 0xc4, 0xf9, 0x86, 0x55, 0xbd, 0xe5, 0x78, 0x98, 0x1c, 0x26, 0x1e, 0x37, 0x70, + 0x68, 0x2d, 0x37, 0xbb, 0x7c, 0x9c, 0x5f, 0xee, 0xa7, 0x45, 0x22, 0x2f, 0x74, 0x1a, 0xb8, 0x4b, + 0xe1, 0x7f, 0xef, 0xa6, 0x40, 0xab, 0xb7, 0x70, 0xc3, 0x4a, 0xeb, 0x15, 0x8f, 0x0c, 0x30, 0xb3, + 0xea, 0x7b, 0x4d, 0x4c, 0xd8, 0x53, 0x22, 0x7c, 0x3b, 0xc2, 0x34, 0x84, 0x65, 0x90, 0x8d, 0x1c, + 0xdb, 0x34, 0x16, 0x8d, 0xa5, 0xf1, 0xf2, 0x13, 0x1f, 0xb6, 0x0a, 0x27, 0xda, 0xad, 0x42, 0xf6, + 0xc6, 0xc6, 0xda, 0x51, 0xab, 0x70, 0xbe, 0x1f, 0x52, 0x78, 0x18, 0x60, 0x5a, 0xba, 0xb1, 0xb1, + 0x86, 0x98, 0x32, 0x7c, 0x01, 0xcc, 0xd8, 0x98, 0x3a, 0x04, 0xdb, 0x2b, 0xdb, 0x1b, 0x37, 0x85, + 0x7d, 0x33, 0xc3, 0x2d, 0x9e, 0x95, 0x16, 0x67, 0xd6, 0xd2, 0x02, 0xa8, 0x5b, 0x07, 0xee, 0x80, + 0x31, 0x7f, 0xf7, 0x2d, 0x5c, 0x0d, 0xa9, 0x99, 0x5d, 0xcc, 0x2e, 0x4d, 0x5c, 0x7e, 0xbc, 0x94, + 0xac, 0xa0, 0x72, 0x81, 0x2f, 0x9b, 0x7c, 0xd8, 0x12, 0xb2, 0xf6, 0xd7, 0xe3, 0x95, 0x2b, 0x4f, + 0x49, 0xb4, 0xb1, 0x2d, 0x61, 0x05, 0xc5, 0xe6, 0x8a, 0xbf, 0xc8, 0x00, 0xa8, 0x3f, 0x3c, 0x0d, + 0x7c, 0x8f, 0xe2, 0x81, 0x3c, 0x3d, 0x05, 0xd3, 0x55, 0x6e, 0x39, 0xc4, 0xb6, 0xc4, 0x35, 0x33, + 0x9f, 0xc6, 0x7b, 0x53, 0xe2, 0x4f, 0xaf, 0xa6, 0xcc, 0xa1, 0x2e, 0x00, 0x78, 0x1d, 0x8c, 0x12, + 0x4c, 0x23, 0x37, 0x34, 0xb3, 0x8b, 0xc6, 0xd2, 0xc4, 0xe5, 0x4b, 0x7d, 0xa1, 0x78, 0x7e, 0xb3, + 0xe4, 0x2b, 0x35, 0x9f, 0x2c, 0x55, 0x42, 0x2b, 0x8c, 0x68, 0xf9, 0x94, 0x44, 0x1a, 0x45, 0xdc, + 0x06, 0x92, 0xb6, 0x8a, 0xdf, 0xc9, 0x80, 0x69, 0x3d, 0x4a, 0x4d, 0x07, 0xef, 0xc3, 0x7d, 0x30, + 0x46, 0x44, 0xb2, 0xf0, 0x38, 0x4d, 0x5c, 0xde, 0x2e, 0x1d, 0x6b, 0x5b, 0x95, 0xba, 0x92, 0xb0, + 0x3c, 0xc1, 0xd6, 0x4c, 0xfe, 0x41, 0x31, 0x1a, 0x7c, 0x1b, 0xe4, 0x89, 0x5c, 0x28, 0x9e, 0x4d, + 0x13, 0x97, 0xbf, 0x38, 0x40, 0x64, 0x61, 0xb8, 0x3c, 0xd9, 0x6e, 0x15, 0xf2, 0xf1, 0x3f, 0xa4, + 0x00, 0x8b, 0xef, 0x65, 0xc0, 0xc2, 0x6a, 0x44, 0x43, 0xbf, 0x81, 0x30, 0xf5, 0x23, 0x52, 0xc5, + 0xab, 0xbe, 0x1b, 0x35, 0xbc, 0x35, 0x5c, 0x73, 0x3c, 0x27, 0x64, 0xd9, 0xba, 0x08, 0x46, 0x3c, + 0xab, 0x81, 0x65, 0xf6, 0x4c, 0xca, 0x98, 0x8e, 0x5c, 0xb3, 0x1a, 0x18, 0x71, 0x0e, 0x93, 0x60, + 0xc9, 0x22, 0xf7, 0x82, 0x92, 0xb8, 0x7e, 0x18, 0x60, 0xc4, 0x39, 0xf0, 0x22, 0x18, 0xad, 0xf9, + 0xa4, 0x61, 0x89, 0x75, 0x1c, 0x4f, 0x56, 0xe6, 0x0a, 0xa7, 0x22, 0xc9, 0x85, 0x4f, 0x83, 0x09, + 0x1b, 0xd3, 0x2a, 0x71, 0x02, 0x06, 0x6d, 0x8e, 0x70, 0xe1, 0xd3, 0x52, 0x78, 0x62, 0x2d, 0x61, + 0x21, 0x5d, 0x0e, 0x5e, 0x02, 0xf9, 0x80, 0x38, 0x3e, 0x71, 0xc2, 0x43, 0x33, 0xb7, 0x68, 0x2c, + 0xe5, 0xca, 0xd3, 0x52, 0x27, 0xbf, 0x2d, 0xe9, 0x48, 0x49, 0xc0, 0x45, 0x90, 0x7f, 0xb1, 0xb2, + 0x75, 0x6d, 0xdb, 0x0a, 0x6f, 0x99, 0xa3, 0x1c, 0x61, 0x84, 0x49, 0x23, 0x45, 0x2d, 0xfe, 0x25, + 0x03, 0xcc, 0x74, 0x54, 0xe2, 0x90, 0xc2, 0x2b, 0x20, 0x4f, 0x43, 0x56, 0x71, 0xea, 0x87, 0x32, + 0x26, 0x8f, 0xc6, 0x60, 0x15, 0x49, 0x3f, 0x6a, 0x15, 0xe6, 0x12, 0x8d, 0x98, 0xca, 0xe3, 0xa1, + 0x74, 0xe1, 0xcf, 0x0c, 0x70, 0x7a, 0x1f, 0xef, 0xde, 0xf2, 0xfd, 0xbd, 0x55, 0xd7, 0xc1, 0x5e, + 0xb8, 0xea, 0x7b, 0x35, 0xa7, 0x2e, 0x73, 0x00, 0x1d, 0x33, 0x07, 0x5e, 0xee, 0xb6, 0x5c, 0x7e, + 0xa0, 0xdd, 0x2a, 0x9c, 0xee, 0xc1, 0x40, 0xbd, 0xfc, 0x80, 0x3b, 0xc0, 0xac, 0xa6, 0x36, 0x89, + 0x2c, 0x60, 0xa2, 0x6c, 0x8d, 0x97, 0xcf, 0xb5, 0x5b, 0x05, 0x73, 0xb5, 0x8f, 0x0c, 0xea, 0xab, + 0x5d, 0xfc, 0x56, 0x36, 0x1d, 0x5e, 0x2d, 0xdd, 0xde, 0x04, 0x79, 0xb6, 0x8d, 0x6d, 0x2b, 0xb4, + 0xe4, 0x46, 0x7c, 0xe2, 0xde, 0x36, 0xbd, 0xa8, 0x19, 0x9b, 0x38, 0xb4, 0xca, 0x50, 0x2e, 0x08, + 0x48, 0x68, 0x48, 0x59, 0x85, 0x5f, 0x05, 0x23, 0x34, 0xc0, 0x55, 0x19, 0xe8, 0x57, 0x8e, 0xbb, + 0xd9, 0xfa, 0x3c, 0x48, 0x25, 0xc0, 0xd5, 0x64, 0x2f, 0xb0, 0x7f, 0x88, 0xc3, 0xc2, 0x77, 0x0c, + 0x30, 0x4a, 0x79, 0x81, 0x92, 0x45, 0xed, 0xb5, 0x61, 0x79, 0x90, 0xaa, 0x82, 0xe2, 0x3f, 0x92, + 0xe0, 0xc5, 0x7f, 0x66, 0xc0, 0xf9, 0x7e, 0xaa, 0xab, 0xbe, 0x67, 0x8b, 0xe5, 0xd8, 0x90, 0x7b, + 0x5b, 0x64, 0xfa, 0xd3, 0xfa, 0xde, 0x3e, 0x6a, 0x15, 0x1e, 0xbe, 0xab, 0x01, 0xad, 0x08, 0xfc, + 0x9f, 0x7a, 0x6e, 0x51, 0x28, 0xce, 0x77, 0x3a, 0x76, 0xd4, 0x2a, 0x4c, 0x29, 0xb5, 0x4e, 0x5f, + 0x61, 0x13, 0x40, 0xd7, 0xa2, 0xe1, 0x75, 0x62, 0x79, 0x54, 0x98, 0x75, 0x1a, 0x58, 0x86, 0xef, + 0xd1, 0x7b, 0x4b, 0x0f, 0xa6, 0x51, 0x9e, 0x97, 0x90, 0xf0, 0x6a, 0x97, 0x35, 0xd4, 0x03, 0x81, + 0xd5, 0x2d, 0x82, 0x2d, 0xaa, 0x4a, 0x91, 0x76, 0xa2, 0x30, 0x2a, 0x92, 0x5c, 0xf8, 0x08, 0x18, + 0x6b, 0x60, 0x4a, 0xad, 0x3a, 0xe6, 0xf5, 0x67, 0x3c, 0x39, 0xa2, 0x37, 0x05, 0x19, 0xc5, 0x7c, + 0xd6, 0x9f, 0x9c, 0xeb, 0x17, 0xb5, 0xab, 0x0e, 0x0d, 0xe1, 0xab, 0x5d, 0x1b, 0xa0, 0x74, 0x6f, + 0x4f, 0xc8, 0xb4, 0x79, 0xfa, 0xab, 0xe2, 0x17, 0x53, 0xb4, 0xe4, 0xff, 0x0a, 0xc8, 0x39, 0x21, + 0x6e, 0xc4, 0x67, 0xf7, 0xcb, 0x43, 0xca, 0xbd, 0xf2, 0x49, 0xe9, 0x43, 0x6e, 0x83, 0xa1, 0x21, + 0x01, 0x5a, 0xfc, 0x65, 0x06, 0x3c, 0xd4, 0x4f, 0x85, 0x1d, 0x28, 0x94, 0x45, 0x3c, 0x70, 0x23, + 0x62, 0xb9, 0x32, 0xe3, 0x54, 0xc4, 0xb7, 0x39, 0x15, 0x49, 0x2e, 0x2b, 0xf9, 0xd4, 0xf1, 0xea, + 0x91, 0x6b, 0x11, 0x99, 0x4e, 0xea, 0xa9, 0x2b, 0x92, 0x8e, 0x94, 0x04, 0x2c, 0x01, 0x40, 0x6f, + 0xf9, 0x24, 0xe4, 0x18, 0xb2, 0x7a, 0x9d, 0x62, 0x05, 0xa2, 0xa2, 0xa8, 0x48, 0x93, 0x60, 0x27, + 0xda, 0x9e, 0xe3, 0xd9, 0x72, 0xd5, 0xd5, 0x2e, 0x7e, 0xc9, 0xf1, 0x6c, 0xc4, 0x39, 0x0c, 0xdf, + 0x75, 0x68, 0xc8, 0x28, 0x72, 0xc9, 0x3b, 0xa2, 0xce, 0x25, 0x95, 0x04, 0xc3, 0xaf, 0xb2, 0xaa, + 0xef, 0x13, 0x07, 0x53, 0x73, 0x34, 0xc1, 0x5f, 0x55, 0x54, 0xa4, 0x49, 0x14, 0xff, 0x91, 0xef, + 0x9f, 0x24, 0xac, 0x94, 0xc0, 0x0b, 0x20, 0x57, 0x27, 0x7e, 0x14, 0xc8, 0x28, 0xa9, 0x68, 0xbf, + 0xc0, 0x88, 0x48, 0xf0, 0x58, 0x56, 0x36, 0x3b, 0xda, 0x54, 0x95, 0x95, 0x71, 0x73, 0x1a, 0xf3, + 0xe1, 0x37, 0x0c, 0x90, 0xf3, 0x64, 0x70, 0x58, 0xca, 0xbd, 0x3a, 0xa4, 0xbc, 0xe0, 0xe1, 0x4d, + 0xdc, 0x15, 0x91, 0x17, 0xc8, 0xf0, 0x29, 0x90, 0xa3, 0x55, 0x3f, 0xc0, 0x32, 0xea, 0x0b, 0xb1, + 0x50, 0x85, 0x11, 0x8f, 0x5a, 0x85, 0x93, 0xb1, 0x39, 0x4e, 0x40, 0x42, 0x18, 0x7e, 0xdb, 0x00, + 0xa0, 0x69, 0xb9, 0x8e, 0x6d, 0xf1, 0x96, 0x21, 0xc7, 0xdd, 0x1f, 0x6c, 0x5a, 0xdf, 0x54, 0xe6, + 0xc5, 0xa2, 0x25, 0xff, 0x91, 0x06, 0x0d, 0xdf, 0x35, 0xc0, 0x24, 0x8d, 0x76, 0x89, 0xd4, 0xa2, + 0xbc, 0xb9, 0x98, 0xb8, 0xfc, 0xa5, 0x81, 0xfa, 0x52, 0xd1, 0x00, 0xca, 0xd3, 0xed, 0x56, 0x61, + 0x52, 0xa7, 0xa0, 0x0e, 0x07, 0xe0, 0xf7, 0x0c, 0x90, 0x6f, 0xc6, 0x67, 0xf6, 0x18, 0xdf, 0xf0, + 0xaf, 0x0f, 0x69, 0x61, 0x65, 0x46, 0x25, 0xbb, 0x40, 0xf5, 0x01, 0xca, 0x03, 0xf8, 0x7b, 0x03, + 0x98, 0x96, 0x2d, 0x0a, 0xbc, 0xe5, 0x6e, 0x13, 0xc7, 0x0b, 0x31, 0x11, 0xfd, 0x26, 0x35, 0xf3, + 0xdc, 0xbd, 0xc1, 0x9e, 0x85, 0xe9, 0x5e, 0xb6, 0xbc, 0x28, 0xbd, 0x33, 0x57, 0xfa, 0xb8, 0x81, + 0xfa, 0x3a, 0xc8, 0x13, 0x2d, 0x69, 0x69, 0xcc, 0xf1, 0x21, 0x24, 0x5a, 0xd2, 0x4b, 0xc9, 0xea, + 0x90, 0x74, 0x50, 0x1a, 0x34, 0xdc, 0x02, 0xb3, 0x01, 0xc1, 0x1c, 0xe0, 0x86, 0xb7, 0xe7, 0xf9, + 0xfb, 0xde, 0x15, 0x07, 0xbb, 0x36, 0x35, 0xc1, 0xa2, 0xb1, 0x94, 0x2f, 0x9f, 0x6d, 0xb7, 0x0a, + 0xb3, 0xdb, 0xbd, 0x04, 0x50, 0x6f, 0xbd, 0xe2, 0xbb, 0xd9, 0xf4, 0x2d, 0x20, 0xdd, 0x45, 0xc0, + 0xf7, 0xc5, 0xd3, 0x8b, 0xd8, 0x50, 0xd3, 0xe0, 0xab, 0xf5, 0xe6, 0x90, 0x92, 0x49, 0xb5, 0x01, + 0x49, 0x27, 0xa7, 0x48, 0x14, 0x69, 0x7e, 0xc0, 0x1f, 0x1b, 0xe0, 0xa4, 0x55, 0xad, 0xe2, 0x20, + 0xc4, 0xb6, 0x28, 0xee, 0x99, 0xcf, 0xa0, 0x7e, 0xcd, 0x4a, 0xaf, 0x4e, 0xae, 0xe8, 0xd0, 0xa8, + 0xd3, 0x13, 0xf8, 0x1c, 0x38, 0x45, 0x43, 0x9f, 0x60, 0x3b, 0xd5, 0x36, 0xc3, 0x76, 0xab, 0x70, + 0xaa, 0xd2, 0xc1, 0x41, 0x29, 0xc9, 0xe2, 0x5f, 0x73, 0xa0, 0x70, 0x97, 0xad, 0x76, 0x0f, 0x17, + 0xb3, 0x8b, 0x60, 0x94, 0x3f, 0xae, 0xcd, 0xa3, 0x92, 0xd7, 0x5a, 0x41, 0x4e, 0x45, 0x92, 0xcb, + 0x0e, 0x0a, 0x86, 0xcf, 0xda, 0x97, 0x2c, 0x17, 0x54, 0x07, 0x45, 0x45, 0x90, 0x51, 0xcc, 0x87, + 0x97, 0x01, 0xb0, 0x71, 0x40, 0x30, 0x3b, 0xac, 0x6c, 0x73, 0x8c, 0x4b, 0xab, 0x45, 0x5a, 0x53, + 0x1c, 0xa4, 0x49, 0xc1, 0x2b, 0x00, 0xc6, 0xff, 0x1c, 0xdf, 0x7b, 0xd9, 0x22, 0x9e, 0xe3, 0xd5, + 0xcd, 0x3c, 0x77, 0x7b, 0x8e, 0x75, 0x63, 0x6b, 0x5d, 0x5c, 0xd4, 0x43, 0x03, 0xbe, 0x0d, 0x46, + 0xc5, 0xd0, 0x87, 0x9f, 0x10, 0x43, 0xac, 0xf2, 0x80, 0xc7, 0x88, 0x43, 0x21, 0x09, 0xd9, 0x5d, + 0xdd, 0x73, 0xf7, 0xbb, 0xba, 0xdf, 0xb1, 0x9c, 0x8e, 0xfe, 0x87, 0x97, 0xd3, 0xe2, 0xbf, 0x8c, + 0x74, 0xcd, 0xd1, 0x1e, 0xb5, 0x52, 0xb5, 0x5c, 0x0c, 0xd7, 0xc0, 0x34, 0xbb, 0x31, 0x21, 0x1c, + 0xb8, 0x4e, 0xd5, 0xa2, 0xfc, 0xc2, 0x2e, 0x92, 0x5d, 0xcd, 0x90, 0x2a, 0x29, 0x3e, 0xea, 0xd2, + 0x80, 0x2f, 0x02, 0x28, 0x6e, 0x11, 0x1d, 0x76, 0x44, 0x43, 0xa4, 0xee, 0x03, 0x95, 0x2e, 0x09, + 0xd4, 0x43, 0x0b, 0xae, 0x82, 0x19, 0xd7, 0xda, 0xc5, 0x6e, 0x05, 0xbb, 0xb8, 0x1a, 0xfa, 0x84, + 0x9b, 0x12, 0x23, 0x8d, 0xd9, 0x76, 0xab, 0x30, 0x73, 0x35, 0xcd, 0x44, 0xdd, 0xf2, 0xc5, 0xf3, + 0xe9, 0xad, 0xad, 0x3f, 0xb8, 0xb8, 0x9b, 0x7d, 0x90, 0x01, 0xf3, 0xfd, 0x33, 0x03, 0x7e, 0x33, + 0xb9, 0x42, 0x8a, 0x1b, 0xc2, 0xeb, 0xc3, 0xca, 0x42, 0x79, 0x87, 0x04, 0xdd, 0xf7, 0x47, 0xf8, + 0x35, 0xd6, 0xae, 0x59, 0x6e, 0x3c, 0xb4, 0x7a, 0x6d, 0x68, 0x2e, 0x30, 0x90, 0xf2, 0xb8, 0xe8, + 0x04, 0x2d, 0x97, 0x37, 0x7e, 0x96, 0x8b, 0x8b, 0xbf, 0x32, 0xd2, 0x53, 0x84, 0x64, 0x07, 0xc3, + 0xef, 0x1b, 0x60, 0xca, 0x0f, 0xb0, 0xb7, 0xb2, 0xbd, 0x71, 0xf3, 0x7f, 0xc4, 0x4e, 0x96, 0xa1, + 0xba, 0x76, 0x4c, 0x3f, 0x5f, 0xac, 0x6c, 0x5d, 0x13, 0x06, 0xb7, 0x89, 0x1f, 0xd0, 0xf2, 0xe9, + 0x76, 0xab, 0x30, 0xb5, 0xd5, 0x09, 0x85, 0xd2, 0xd8, 0xc5, 0x06, 0x98, 0x5d, 0x3f, 0x08, 0x31, + 0xf1, 0x2c, 0x77, 0xcd, 0xaf, 0x46, 0x0d, 0xec, 0x85, 0xc2, 0xd1, 0xd4, 0xc4, 0xcb, 0xb8, 0xc7, + 0x89, 0xd7, 0x43, 0x20, 0x1b, 0x11, 0x57, 0x66, 0xf1, 0x84, 0x9a, 0xe8, 0xa2, 0xab, 0x88, 0xd1, + 0x8b, 0xe7, 0xc1, 0x08, 0xf3, 0x13, 0x9e, 0x05, 0x59, 0x62, 0xed, 0x73, 0xab, 0x93, 0xe5, 0x31, + 0x26, 0x82, 0xac, 0x7d, 0xc4, 0x68, 0xc5, 0x3f, 0x9f, 0x07, 0x53, 0xa9, 0x67, 0x81, 0xf3, 0x20, + 0xa3, 0xc6, 0xc4, 0x40, 0x1a, 0xcd, 0x6c, 0xac, 0xa1, 0x8c, 0x63, 0xc3, 0x67, 0x54, 0xf1, 0x15, + 0xa0, 0x05, 0x75, 0x96, 0x70, 0x2a, 0xeb, 0xcf, 0x13, 0x73, 0xcc, 0x91, 0xb8, 0x70, 0x32, 0x1f, + 0x70, 0x4d, 0xee, 0x12, 0xe1, 0x03, 0xae, 0x21, 0x46, 0xfb, 0xb4, 0xe3, 0xbe, 0x78, 0xde, 0x98, + 0xbb, 0x87, 0x79, 0xe3, 0xe8, 0x1d, 0xe7, 0x8d, 0x17, 0x40, 0x2e, 0x74, 0x42, 0x17, 0xf3, 0x83, + 0x4c, 0xbb, 0x46, 0x5d, 0x67, 0x44, 0x24, 0x78, 0xf0, 0x2d, 0x30, 0x66, 0xe3, 0x9a, 0x15, 0xb9, + 0x21, 0x3f, 0xb3, 0x26, 0x2e, 0xaf, 0x0e, 0x20, 0x85, 0xc4, 0x30, 0x78, 0x4d, 0xd8, 0x45, 0x31, + 0x00, 0x7c, 0x18, 0x8c, 0x35, 0xac, 0x03, 0xa7, 0x11, 0x35, 0x78, 0x83, 0x69, 0x08, 0xb1, 0x4d, + 0x41, 0x42, 0x31, 0x8f, 0x55, 0x46, 0x7c, 0x50, 0x75, 0x23, 0xea, 0x34, 0xb1, 0x64, 0xca, 0xe6, + 0x4f, 0x55, 0xc6, 0xf5, 0x14, 0x1f, 0x75, 0x69, 0x70, 0x30, 0xc7, 0xe3, 0xca, 0x13, 0x1a, 0x98, + 0x20, 0xa1, 0x98, 0xd7, 0x09, 0x26, 0xe5, 0x27, 0xfb, 0x81, 0x49, 0xe5, 0x2e, 0x0d, 0xf8, 0x18, + 0x18, 0x6f, 0x58, 0x07, 0x57, 0xb1, 0x57, 0x0f, 0x6f, 0x99, 0x27, 0x17, 0x8d, 0xa5, 0x6c, 0xf9, + 0x64, 0xbb, 0x55, 0x18, 0xdf, 0x8c, 0x89, 0x28, 0xe1, 0x73, 0x61, 0xc7, 0x93, 0xc2, 0xa7, 0x34, + 0xe1, 0x98, 0x88, 0x12, 0x3e, 0xeb, 0x5e, 0x02, 0x2b, 0x64, 0x9b, 0xcb, 0x9c, 0xea, 0xbc, 0xe6, + 0x6e, 0x0b, 0x32, 0x8a, 0xf9, 0x70, 0x09, 0xe4, 0x1b, 0xd6, 0x01, 0x1f, 0x49, 0x98, 0xd3, 0xdc, + 0x2c, 0x1f, 0x8c, 0x6f, 0x4a, 0x1a, 0x52, 0x5c, 0x2e, 0xe9, 0x78, 0x42, 0x72, 0x46, 0x93, 0x94, + 0x34, 0xa4, 0xb8, 0x2c, 0x89, 0x23, 0xcf, 0xb9, 0x1d, 0x61, 0x21, 0x0c, 0x79, 0x64, 0x54, 0x12, + 0xdf, 0x48, 0x58, 0x48, 0x97, 0x83, 0x25, 0x00, 0x1a, 0x91, 0x1b, 0x3a, 0x81, 0x8b, 0xb7, 0x6a, + 0xe6, 0x69, 0x1e, 0x7f, 0xde, 0xf4, 0x6f, 0x2a, 0x2a, 0xd2, 0x24, 0x20, 0x06, 0x23, 0xd8, 0x8b, + 0x1a, 0xe6, 0x19, 0x7e, 0xb0, 0x0f, 0x24, 0x05, 0xd5, 0xce, 0x59, 0xf7, 0xa2, 0x06, 0xe2, 0xe6, + 0xe1, 0x33, 0xe0, 0x64, 0xc3, 0x3a, 0x60, 0xe5, 0x00, 0x93, 0xd0, 0xc1, 0xd4, 0x9c, 0xe5, 0x0f, + 0x3f, 0xc3, 0xba, 0xdd, 0x4d, 0x9d, 0x81, 0x3a, 0xe5, 0xb8, 0xa2, 0xe3, 0x69, 0x8a, 0x73, 0x9a, + 0xa2, 0xce, 0x40, 0x9d, 0x72, 0x2c, 0xd2, 0x04, 0xdf, 0x8e, 0x1c, 0x82, 0x6d, 0xf3, 0x01, 0xde, + 0x20, 0xcb, 0x97, 0x15, 0x82, 0x86, 0x14, 0x17, 0x36, 0xe3, 0xd9, 0x95, 0xc9, 0xb7, 0xe1, 0x8d, + 0xc1, 0x56, 0xf2, 0x2d, 0xb2, 0x42, 0x88, 0x75, 0x28, 0x4e, 0x1a, 0x7d, 0x6a, 0x05, 0x29, 0xc8, + 0x59, 0xae, 0xbb, 0x55, 0x33, 0xcf, 0xf2, 0xd8, 0x0f, 0xfa, 0x04, 0x51, 0x55, 0x67, 0x85, 0x81, + 0x20, 0x81, 0xc5, 0x40, 0x7d, 0x8f, 0xa5, 0xc6, 0xfc, 0x70, 0x41, 0xb7, 0x18, 0x08, 0x12, 0x58, + 0xfc, 0x49, 0xbd, 0xc3, 0xad, 0x9a, 0xf9, 0xe0, 0x90, 0x9f, 0x94, 0x81, 0x20, 0x81, 0x05, 0x1d, + 0x90, 0xf5, 0xfc, 0xd0, 0x3c, 0x37, 0x94, 0xe3, 0x99, 0x1f, 0x38, 0xd7, 0xfc, 0x10, 0x31, 0x0c, + 0xf8, 0x23, 0x03, 0x80, 0x20, 0x49, 0xd1, 0x87, 0x06, 0x32, 0x12, 0x49, 0x41, 0x96, 0x92, 0xdc, + 0x5e, 0xf7, 0x42, 0x72, 0x98, 0x5c, 0x8f, 0xb4, 0x3d, 0xa0, 0x79, 0x01, 0x7f, 0x6e, 0x80, 0x33, + 0x7a, 0x9b, 0xac, 0xdc, 0x5b, 0xe0, 0x11, 0xb9, 0x3e, 0xe8, 0x34, 0x2f, 0xfb, 0xbe, 0x5b, 0x36, + 0xdb, 0xad, 0xc2, 0x99, 0x95, 0x1e, 0xa8, 0xa8, 0xa7, 0x2f, 0xf0, 0xd7, 0x06, 0x98, 0x91, 0x55, + 0x54, 0xf3, 0xb0, 0xc0, 0x03, 0x88, 0x07, 0x1d, 0xc0, 0x34, 0x8e, 0x88, 0xa3, 0x7a, 0xc9, 0xde, + 0xc5, 0x47, 0xdd, 0xae, 0xc1, 0xdf, 0x19, 0x60, 0xd2, 0xc6, 0x01, 0xf6, 0x6c, 0xec, 0x55, 0x99, + 0xaf, 0x8b, 0x03, 0x19, 0x59, 0xa4, 0x7d, 0x5d, 0xd3, 0x20, 0x84, 0x9b, 0x25, 0xe9, 0xe6, 0xa4, + 0xce, 0x3a, 0x6a, 0x15, 0xe6, 0x12, 0x55, 0x9d, 0x83, 0x3a, 0xbc, 0x84, 0xef, 0x19, 0x60, 0x2a, + 0x59, 0x00, 0x71, 0xa4, 0x9c, 0x1f, 0x62, 0x1e, 0xf0, 0xf6, 0x75, 0xa5, 0x13, 0x10, 0xa5, 0x3d, + 0x80, 0xbf, 0x31, 0x58, 0xa7, 0x16, 0xdf, 0xfb, 0xa8, 0x59, 0xe4, 0xb1, 0x7c, 0x63, 0xe0, 0xb1, + 0x54, 0x08, 0x22, 0x94, 0x97, 0x92, 0x56, 0x50, 0x71, 0x8e, 0x5a, 0x85, 0x59, 0x3d, 0x92, 0x8a, + 0x81, 0x74, 0x0f, 0xe1, 0x77, 0x0d, 0x30, 0x89, 0x93, 0x8e, 0x9b, 0x9a, 0x17, 0x06, 0x12, 0xc4, + 0x9e, 0x4d, 0xbc, 0xb8, 0xa9, 0x6b, 0x2c, 0x8a, 0x3a, 0xb0, 0x59, 0x07, 0x89, 0x0f, 0xac, 0x46, + 0xe0, 0x62, 0xf3, 0xbf, 0x06, 0xdc, 0x41, 0xae, 0x0b, 0xbb, 0x28, 0x06, 0x80, 0x97, 0x40, 0xde, + 0x8b, 0x5c, 0xd7, 0xda, 0x75, 0xb1, 0xf9, 0x30, 0xef, 0x45, 0xd4, 0x48, 0xf6, 0x9a, 0xa4, 0x23, + 0x25, 0x01, 0x6b, 0x60, 0xf1, 0xe0, 0x25, 0xf5, 0x79, 0x52, 0xcf, 0xa1, 0xa1, 0x79, 0x91, 0x5b, + 0x99, 0x6f, 0xb7, 0x0a, 0x73, 0x3b, 0xbd, 0xc7, 0x8a, 0x77, 0xb5, 0x01, 0x5f, 0x01, 0x0f, 0x6a, + 0x32, 0xeb, 0x8d, 0x5d, 0x6c, 0xdb, 0xd8, 0x8e, 0x2f, 0x6e, 0xe6, 0x7f, 0x8b, 0xc1, 0x65, 0xbc, + 0xc1, 0x77, 0xd2, 0x02, 0xe8, 0x4e, 0xda, 0xf0, 0x2a, 0x98, 0xd3, 0xd8, 0x1b, 0x5e, 0xb8, 0x45, + 0x2a, 0x21, 0x71, 0xbc, 0xba, 0xb9, 0xc4, 0xed, 0x9e, 0x89, 0x77, 0xe4, 0x8e, 0xc6, 0x43, 0x7d, + 0x74, 0xe0, 0x17, 0x3a, 0xac, 0xf1, 0x57, 0x68, 0x56, 0xf0, 0x12, 0x3e, 0xa4, 0xe6, 0x23, 0xbc, + 0x3b, 0xe1, 0x8b, 0xbd, 0xa3, 0xd1, 0x51, 0x1f, 0x79, 0xf8, 0x79, 0x70, 0x3a, 0xc5, 0x61, 0x57, + 0x14, 0xf3, 0x51, 0x71, 0xd7, 0x60, 0xfd, 0xec, 0x4e, 0x4c, 0x44, 0xbd, 0x24, 0xe1, 0xe7, 0x00, + 0xd4, 0xc8, 0x9b, 0x56, 0xc0, 0xf5, 0x1f, 0x13, 0xd7, 0x1e, 0xb6, 0xa2, 0x3b, 0x92, 0x86, 0x7a, + 0xc8, 0xc1, 0x9f, 0x18, 0x1d, 0x4f, 0x92, 0xdc, 0x8e, 0xa9, 0x79, 0x89, 0xef, 0xdf, 0xcd, 0x63, + 0x66, 0xa1, 0xf6, 0x1e, 0x24, 0x72, 0xb1, 0x16, 0x66, 0x0d, 0x0a, 0xf5, 0x71, 0x61, 0x9e, 0xdd, + 0xd0, 0x53, 0x15, 0x1e, 0x4e, 0x83, 0xec, 0x1e, 0x96, 0x5f, 0x55, 0x20, 0xf6, 0x13, 0xda, 0x20, + 0xd7, 0xb4, 0xdc, 0x28, 0x1e, 0x32, 0x0c, 0xb8, 0x3b, 0x40, 0xc2, 0xf8, 0x73, 0x99, 0x67, 0x8d, + 0xf9, 0xf7, 0x0d, 0x30, 0xd7, 0xfb, 0xe0, 0xb9, 0xaf, 0x6e, 0xfd, 0xd4, 0x00, 0x33, 0x5d, 0x67, + 0x4c, 0x0f, 0x8f, 0x6e, 0x77, 0x7a, 0xf4, 0xca, 0xa0, 0x0f, 0x0b, 0xb1, 0x39, 0x78, 0x87, 0xac, + 0xbb, 0xf7, 0x03, 0x03, 0x4c, 0xa7, 0xcb, 0xf6, 0xfd, 0x8c, 0x57, 0xf1, 0xfd, 0x0c, 0x98, 0xeb, + 0xdd, 0xd8, 0x43, 0xa2, 0x26, 0x18, 0xc3, 0x99, 0x04, 0xf5, 0x9a, 0x1a, 0xbf, 0x63, 0x80, 0x89, + 0xb7, 0x94, 0x5c, 0xfc, 0xd6, 0x7d, 0xe0, 0x33, 0xa8, 0xf8, 0x9c, 0x4c, 0x18, 0x14, 0xe9, 0xb8, + 0xc5, 0xdf, 0x1a, 0x60, 0xb6, 0x67, 0x03, 0x00, 0x2f, 0x82, 0x51, 0xcb, 0x75, 0xfd, 0x7d, 0x31, + 0x4a, 0xd4, 0xde, 0x11, 0xac, 0x70, 0x2a, 0x92, 0x5c, 0x2d, 0x7a, 0x99, 0xcf, 0x2a, 0x7a, 0xc5, + 0x3f, 0x18, 0xe0, 0xdc, 0x9d, 0x32, 0xf1, 0xbe, 0x2c, 0xe9, 0x12, 0xc8, 0xcb, 0xe6, 0xfd, 0x90, + 0x2f, 0xa7, 0x2c, 0xc5, 0xb2, 0x68, 0xf0, 0x0f, 0xcd, 0xc4, 0xaf, 0xe2, 0x07, 0x06, 0x98, 0xae, + 0x60, 0xd2, 0x74, 0xaa, 0x18, 0xe1, 0x1a, 0x26, 0xd8, 0xab, 0x62, 0xb8, 0x0c, 0xc6, 0xf9, 0xeb, + 0xee, 0xc0, 0xaa, 0xc6, 0xaf, 0x6e, 0x66, 0x64, 0xc8, 0xc7, 0xaf, 0xc5, 0x0c, 0x94, 0xc8, 0xa8, + 0xd7, 0x3c, 0x99, 0xbe, 0xaf, 0x79, 0xce, 0x81, 0x91, 0x20, 0x19, 0x44, 0xe7, 0x19, 0x97, 0xcf, + 0x9e, 0x39, 0x95, 0x73, 0x7d, 0x12, 0xf2, 0xe9, 0x5a, 0x4e, 0x72, 0x7d, 0x12, 0x22, 0x4e, 0x2d, + 0xfe, 0x29, 0x03, 0x4e, 0x75, 0xd6, 0x71, 0x06, 0x48, 0x22, 0xb7, 0xeb, 0xbd, 0x12, 0xe3, 0x21, + 0xce, 0xd1, 0x3f, 0x77, 0xc9, 0xdc, 0xf9, 0x73, 0x17, 0xf8, 0x02, 0x98, 0x91, 0x3f, 0xd7, 0x0f, + 0x02, 0x82, 0x29, 0x7f, 0x77, 0x9a, 0xed, 0xfc, 0x68, 0x76, 0x33, 0x2d, 0x80, 0xba, 0x75, 0xe0, + 0xff, 0xa7, 0x3e, 0xc5, 0xb9, 0x90, 0x7c, 0x86, 0xc3, 0x5a, 0x42, 0xde, 0x67, 0xdc, 0x64, 0x65, + 0x60, 0x9d, 0x10, 0x9f, 0xa4, 0xbe, 0xcf, 0x59, 0x06, 0xe3, 0x35, 0x26, 0xc0, 0xe7, 0xf5, 0xb9, + 0xce, 0xa0, 0x5f, 0x89, 0x19, 0x28, 0x91, 0x81, 0xcf, 0x83, 0x29, 0x3f, 0x10, 0x1d, 0xf0, 0x96, + 0x6b, 0x57, 0xb0, 0x5b, 0xe3, 0x93, 0xc4, 0x7c, 0x3c, 0xee, 0xed, 0x60, 0xa1, 0xb4, 0x6c, 0xf1, + 0x8f, 0x06, 0xe8, 0xf5, 0xa1, 0x1d, 0x3c, 0x2b, 0xc6, 0xb6, 0xda, 0x2c, 0x34, 0x1e, 0xd9, 0xc2, + 0x26, 0x18, 0xa3, 0x22, 0x57, 0x64, 0x2e, 0x6f, 0x1d, 0x33, 0x97, 0xd3, 0x99, 0x27, 0xfa, 0xc5, + 0x98, 0x1a, 0x83, 0xb1, 0x74, 0xae, 0x5a, 0xe5, 0xc8, 0xb3, 0xe5, 0x24, 0x7f, 0x52, 0xa4, 0xf3, + 0xea, 0x8a, 0xa0, 0x21, 0xc5, 0x2d, 0x57, 0x3f, 0xfc, 0x64, 0xe1, 0xc4, 0x47, 0x9f, 0x2c, 0x9c, + 0xf8, 0xf8, 0x93, 0x85, 0x13, 0x5f, 0x6f, 0x2f, 0x18, 0x1f, 0xb6, 0x17, 0x8c, 0x8f, 0xda, 0x0b, + 0xc6, 0xc7, 0xed, 0x05, 0xe3, 0x6f, 0xed, 0x05, 0xe3, 0x87, 0x7f, 0x5f, 0x38, 0xf1, 0xe5, 0xe7, + 0x8f, 0xf5, 0x6d, 0xfb, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x16, 0x56, 0x59, 0x35, 0x34, 0x2f, + 0x00, 0x00, } func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { @@ -2660,6 +2662,16 @@ func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.OptionalOldSelf != nil { + i-- + if *m.OptionalOldSelf { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } i -= len(m.FieldPath) copy(dAtA[i:], m.FieldPath) i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) @@ -3374,6 +3386,9 @@ func (m *ValidationRule) Size() (n int) { } l = len(m.FieldPath) n += 1 + l + sovGenerated(uint64(l)) + if m.OptionalOldSelf != nil { + n += 2 + } return n } @@ -3843,6 +3858,7 @@ func (this *ValidationRule) String() string { `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, `Reason:` + valueToStringGenerated(this.Reason) + `,`, `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `OptionalOldSelf:` + valueToStringGenerated(this.OptionalOldSelf) + `,`, `}`, }, "") return s @@ -9166,6 +9182,27 @@ func (m *ValidationRule) Unmarshal(dAtA []byte) error { } m.FieldPath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OptionalOldSelf", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OptionalOldSelf = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 7cfb9c4dd5..b8477322b7 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -710,6 +710,18 @@ message ValidationRule { // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with // non-intersecting keys are appended, retaining their partial order. + // + // If `rule` makes use of the `oldSelf` variable it is implicitly a + // `transition rule`. + // + // By default, the `oldSelf` variable is the same type as `self`. + // When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional + // variable whose value() is the same type as `self`. + // See the documentation for the `optionalOldSelf` field for details. + // + // Transition rules by default are applied only on UPDATE requests and are + // skipped if an old value could not be found. You can opt a transition + // rule into unconditional evaluation by setting `optionalOldSelf` to true. optional string rule = 1; // Message represents the message displayed when validation fails. The message is required if the Rule contains @@ -750,6 +762,24 @@ message ValidationRule { // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` // +optional optional string fieldPath = 5; + + // optionalOldSelf is used to opt a transition rule into evaluation + // even when the object is first created, or if the old object is + // missing the value. + // + // When enabled `oldSelf` will be a CEL optional whose value will be + // `None` if there is no old value, or when the object is initially created. + // + // You may check for presence of oldSelf using `oldSelf.hasValue()` and + // unwrap it after checking using `oldSelf.value()`. Check the CEL + // documentation for Optional types for more information: + // https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes + // + // May not be set unless `oldSelf` is used in `rule`. + // + // +featureGate=CRDValidationRatcheting + // +optional + optional bool optionalOldSelf = 6; } // WebhookClientConfig contains the information to make a TLS connection with the webhook. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go index 59a0932276..24c45bb04e 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go @@ -249,6 +249,19 @@ type ValidationRule struct { // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with // non-intersecting keys are appended, retaining their partial order. + // + // If `rule` makes use of the `oldSelf` variable it is implicitly a + // `transition rule`. + // + // By default, the `oldSelf` variable is the same type as `self`. + // When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional + // variable whose value() is the same type as `self`. + // See the documentation for the `optionalOldSelf` field for details. + // + // Transition rules by default are applied only on UPDATE requests and are + // skipped if an old value could not be found. You can opt a transition + // rule into unconditional evaluation by setting `optionalOldSelf` to true. + // Rule string `json:"rule" protobuf:"bytes,1,opt,name=rule"` // Message represents the message displayed when validation fails. The message is required if the Rule contains // line breaks. The message must not contain line breaks. @@ -285,6 +298,24 @@ type ValidationRule struct { // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` // +optional FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,5,opt,name=fieldPath"` + + // optionalOldSelf is used to opt a transition rule into evaluation + // even when the object is first created, or if the old object is + // missing the value. + // + // When enabled `oldSelf` will be a CEL optional whose value will be + // `None` if there is no old value, or when the object is initially created. + // + // You may check for presence of oldSelf using `oldSelf.hasValue()` and + // unwrap it after checking using `oldSelf.value()`. Check the CEL + // documentation for Optional types for more information: + // https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes + // + // May not be set unless `oldSelf` is used in `rule`. + // + // +featureGate=CRDValidationRatcheting + // +optional + OptionalOldSelf *bool `json:"optionalOldSelf,omitempty" protobuf:"bytes,6,opt,name=optionalOldSelf"` } // JSON represents any valid JSON value. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index 65c023f06a..fa6e0ef24e 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -1309,6 +1309,7 @@ func autoConvert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(in *Vali out.MessageExpression = in.MessageExpression out.Reason = (*apiextensions.FieldValueErrorReason)(unsafe.Pointer(in.Reason)) out.FieldPath = in.FieldPath + out.OptionalOldSelf = (*bool)(unsafe.Pointer(in.OptionalOldSelf)) return nil } @@ -1323,6 +1324,7 @@ func autoConvert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(in *apie out.MessageExpression = in.MessageExpression out.Reason = (*FieldValueErrorReason)(unsafe.Pointer(in.Reason)) out.FieldPath = in.FieldPath + out.OptionalOldSelf = (*bool)(unsafe.Pointer(in.OptionalOldSelf)) return nil } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go index 9eeef6ca46..bb8ab06cb9 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -644,6 +644,11 @@ func (in *ValidationRule) DeepCopyInto(out *ValidationRule) { *out = new(FieldValueErrorReason) **out = **in } + if in.OptionalOldSelf != nil { + in, out := &in.OptionalOldSelf, &out.OptionalOldSelf + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go index f8a5ffbfbb..b5e5c35c55 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go @@ -536,6 +536,11 @@ func (in *ValidationRule) DeepCopyInto(out *ValidationRule) { *out = new(FieldValueErrorReason) **out = **in } + if in.OptionalOldSelf != nil { + in, out := &in.OptionalOldSelf, &out.OptionalOldSelf + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go index 1d38873dae..c0eb0b51bd 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go @@ -30,6 +30,7 @@ type ValidationRuleApplyConfiguration struct { MessageExpression *string `json:"messageExpression,omitempty"` Reason *v1.FieldValueErrorReason `json:"reason,omitempty"` FieldPath *string `json:"fieldPath,omitempty"` + OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"` } // ValidationRuleApplyConfiguration constructs an declarative configuration of the ValidationRule type for use with @@ -77,3 +78,11 @@ func (b *ValidationRuleApplyConfiguration) WithFieldPath(value string) *Validati b.FieldPath = &value return b } + +// WithOptionalOldSelf sets the OptionalOldSelf field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OptionalOldSelf field is set to the value of the last call. +func (b *ValidationRuleApplyConfiguration) WithOptionalOldSelf(value bool) *ValidationRuleApplyConfiguration { + b.OptionalOldSelf = &value + return b +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go index e52c203034..1b0df078b5 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go @@ -30,6 +30,7 @@ type ValidationRuleApplyConfiguration struct { MessageExpression *string `json:"messageExpression,omitempty"` Reason *v1beta1.FieldValueErrorReason `json:"reason,omitempty"` FieldPath *string `json:"fieldPath,omitempty"` + OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"` } // ValidationRuleApplyConfiguration constructs an declarative configuration of the ValidationRule type for use with @@ -77,3 +78,11 @@ func (b *ValidationRuleApplyConfiguration) WithFieldPath(value string) *Validati b.FieldPath = &value return b } + +// WithOptionalOldSelf sets the OptionalOldSelf field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OptionalOldSelf field is set to the value of the last call. +func (b *ValidationRuleApplyConfiguration) WithOptionalOldSelf(value bool) *ValidationRuleApplyConfiguration { + b.OptionalOldSelf = &value + return b +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go index 60c8209de0..cbdf2eeb83 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go @@ -22,14 +22,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SetStatusCondition sets the corresponding condition in conditions to newCondition. +// SetStatusCondition sets the corresponding condition in conditions to newCondition and returns true +// if the conditions are changed by this call. // conditions must be non-nil. // 1. if the condition of the specified type already exists (all fields of the existing condition are updated to // newCondition, LastTransitionTime is set to now if the new status differs from the old status) // 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) -func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) { +func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) (changed bool) { if conditions == nil { - return + return false } existingCondition := FindStatusCondition(*conditions, newCondition.Type) if existingCondition == nil { @@ -37,7 +38,7 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond newCondition.LastTransitionTime = metav1.NewTime(time.Now()) } *conditions = append(*conditions, newCondition) - return + return true } if existingCondition.Status != newCondition.Status { @@ -47,18 +48,31 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond } else { existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) } + changed = true } - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message - existingCondition.ObservedGeneration = newCondition.ObservedGeneration + if existingCondition.Reason != newCondition.Reason { + existingCondition.Reason = newCondition.Reason + changed = true + } + if existingCondition.Message != newCondition.Message { + existingCondition.Message = newCondition.Message + changed = true + } + if existingCondition.ObservedGeneration != newCondition.ObservedGeneration { + existingCondition.ObservedGeneration = newCondition.ObservedGeneration + changed = true + } + + return changed } -// RemoveStatusCondition removes the corresponding conditionType from conditions. +// RemoveStatusCondition removes the corresponding conditionType from conditions if present. Returns +// true if it was present and got removed. // conditions must be non-nil. -func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) { +func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) (removed bool) { if conditions == nil || len(*conditions) == 0 { - return + return false } newConditions := make([]metav1.Condition, 0, len(*conditions)-1) for _, condition := range *conditions { @@ -67,7 +81,10 @@ func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) } } + removed = len(*conditions) != len(newConditions) *conditions = newConditions + + return removed } // FindStatusCondition finds the conditionType in conditions. diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go index a8866a43e1..2eebec667d 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go @@ -203,6 +203,44 @@ func (a *int64Amount) Sub(b int64Amount) bool { return a.Add(int64Amount{value: -b.value, scale: b.scale}) } +// Mul multiplies the provided b to the current amount, or +// returns false if overflow or underflow would result. +func (a *int64Amount) Mul(b int64) bool { + switch { + case a.value == 0: + return true + case b == 0: + a.value = 0 + a.scale = 0 + return true + case a.scale == 0: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + a.value = c + case a.scale > 0: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + if _, ok = positiveScaleInt64(c, a.scale); !ok { + return false + } + a.value = c + default: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + if _, ok = negativeScaleInt64(c, -a.scale); !ok { + return false + } + a.value = c + } + return true +} + // AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision // was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) { diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index b47d554b3c..69f1bc336d 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -592,6 +592,16 @@ func (q *Quantity) Sub(y Quantity) { q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec()) } +// Mul multiplies the provided y to the current value. +// It will return false if the result is inexact. Otherwise, it will return true. +func (q *Quantity) Mul(y int64) bool { + q.s = "" + if q.d.Dec == nil && q.i.Mul(y) { + return true + } + return q.ToDec().d.Dec.Mul(q.d.Dec, inf.NewDec(y, inf.Scale(0))).UnscaledBig().IsInt64() +} + // Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the // quantity is greater than y. func (q *Quantity) Cmp(y Quantity) int { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 2e33283ef2..0f58d66c09 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -173,7 +173,7 @@ func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]s if str, ok := v.(string); ok { strMap[k] = str } else { - return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v) + return nil, false, fmt.Errorf("%v accessor error: contains non-string value in the map under key %q: %v is of the type %T, expected string", jsonPath(fields), k, v, v) } } return strMap, true, nil diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go index 7bd1a3a6a5..f46a24cc6c 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -257,3 +257,26 @@ func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersion } return obj, gvk, err } + +type encoderWithAllocator struct { + encoder EncoderWithAllocator + memAllocator MemoryAllocator +} + +// NewEncoderWithAllocator returns a new encoder +func NewEncoderWithAllocator(e EncoderWithAllocator, a MemoryAllocator) Encoder { + return &encoderWithAllocator{ + encoder: e, + memAllocator: a, + } +} + +// Encode writes the provided object to the nested writer +func (e *encoderWithAllocator) Encode(obj Object, w io.Writer) error { + return e.encoder.EncodeWithAllocator(obj, w, e.memAllocator) +} + +// Identifier returns identifier of this encoder. +func (e *encoderWithAllocator) Identifier() Identifier { + return e.encoder.Identifier() +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go index 87b3fec3f2..971c46d496 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -134,23 +134,3 @@ func (e *encoder) Encode(obj runtime.Object) error { e.buf.Reset() return err } - -type encoderWithAllocator struct { - writer io.Writer - encoder runtime.EncoderWithAllocator - memAllocator runtime.MemoryAllocator -} - -// NewEncoderWithAllocator returns a new streaming encoder -func NewEncoderWithAllocator(w io.Writer, e runtime.EncoderWithAllocator, a runtime.MemoryAllocator) Encoder { - return &encoderWithAllocator{ - writer: w, - encoder: e, - memAllocator: a, - } -} - -// Encode writes the provided object to the nested writer -func (e *encoderWithAllocator) Encode(obj runtime.Object) error { - return e.encoder.EncodeWithAllocator(obj, e.writer, e.memAllocator) -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go index 1328dd6120..ad486d580f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go @@ -136,6 +136,19 @@ func (c *LRUExpireCache) Remove(key interface{}) { delete(c.entries, key) } +// RemoveAll removes all keys that match predicate. +func (c *LRUExpireCache) RemoveAll(predicate func(key any) bool) { + c.lock.Lock() + defer c.lock.Unlock() + + for key, element := range c.entries { + if predicate(key) { + c.evictionList.Remove(element) + delete(c.entries, key) + } + } +} + // Keys returns all unexpired keys in the cache. // // Keep in mind that subsequent calls to Get() for any of the returned keys diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go index 32f075782a..a32fce5a0c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go @@ -17,6 +17,7 @@ limitations under the License. package httpstream import ( + "errors" "fmt" "io" "net/http" @@ -95,6 +96,26 @@ type Stream interface { Identifier() uint32 } +// UpgradeFailureError encapsulates the cause for why the streaming +// upgrade request failed. Implements error interface. +type UpgradeFailureError struct { + Cause error +} + +func (u *UpgradeFailureError) Error() string { + return fmt.Sprintf("unable to upgrade streaming request: %s", u.Cause) +} + +// IsUpgradeFailure returns true if the passed error is (or wrapped error contains) +// the UpgradeFailureError. +func IsUpgradeFailure(err error) bool { + if err == nil { + return false + } + var upgradeErr *UpgradeFailureError + return errors.As(err, &upgradeErr) +} + // IsUpgradeRequest returns true if the given request is a connection upgrade request func IsUpgradeRequest(req *http.Request) bool { for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] { diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go index 7fe52ee568..c78326fa3b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go @@ -38,6 +38,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/httpstream" utilnet "k8s.io/apimachinery/pkg/util/net" + apiproxy "k8s.io/apimachinery/pkg/util/proxy" "k8s.io/apimachinery/third_party/forked/golang/netutil" ) @@ -68,6 +69,10 @@ type SpdyRoundTripper struct { // pingPeriod is a period for sending Ping frames over established // connections. pingPeriod time.Duration + + // upgradeTransport is an optional substitute for dialing if present. This field is + // mutually exclusive with the "tlsConfig", "Dialer", and "proxier". + upgradeTransport http.RoundTripper } var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{} @@ -76,43 +81,61 @@ var _ utilnet.Dialer = &SpdyRoundTripper{} // NewRoundTripper creates a new SpdyRoundTripper that will use the specified // tlsConfig. -func NewRoundTripper(tlsConfig *tls.Config) *SpdyRoundTripper { +func NewRoundTripper(tlsConfig *tls.Config) (*SpdyRoundTripper, error) { return NewRoundTripperWithConfig(RoundTripperConfig{ - TLS: tlsConfig, + TLS: tlsConfig, + UpgradeTransport: nil, }) } // NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the // specified tlsConfig and proxy func. -func NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper { +func NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) (*SpdyRoundTripper, error) { return NewRoundTripperWithConfig(RoundTripperConfig{ - TLS: tlsConfig, - Proxier: proxier, + TLS: tlsConfig, + Proxier: proxier, + UpgradeTransport: nil, }) } // NewRoundTripperWithConfig creates a new SpdyRoundTripper with the specified -// configuration. -func NewRoundTripperWithConfig(cfg RoundTripperConfig) *SpdyRoundTripper { +// configuration. Returns an error if the SpdyRoundTripper is misconfigured. +func NewRoundTripperWithConfig(cfg RoundTripperConfig) (*SpdyRoundTripper, error) { + // Process UpgradeTransport, which is mutually exclusive to TLSConfig and Proxier. + if cfg.UpgradeTransport != nil { + if cfg.TLS != nil || cfg.Proxier != nil { + return nil, fmt.Errorf("SpdyRoundTripper: UpgradeTransport is mutually exclusive to TLSConfig or Proxier") + } + tlsConfig, err := utilnet.TLSClientConfig(cfg.UpgradeTransport) + if err != nil { + return nil, fmt.Errorf("SpdyRoundTripper: Unable to retrieve TLSConfig from UpgradeTransport: %v", err) + } + cfg.TLS = tlsConfig + } if cfg.Proxier == nil { cfg.Proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) } return &SpdyRoundTripper{ - tlsConfig: cfg.TLS, - proxier: cfg.Proxier, - pingPeriod: cfg.PingPeriod, - } + tlsConfig: cfg.TLS, + proxier: cfg.Proxier, + pingPeriod: cfg.PingPeriod, + upgradeTransport: cfg.UpgradeTransport, + }, nil } // RoundTripperConfig is a set of options for an SpdyRoundTripper. type RoundTripperConfig struct { - // TLS configuration used by the round tripper. + // TLS configuration used by the round tripper if UpgradeTransport not present. TLS *tls.Config // Proxier is a proxy function invoked on each request. Optional. Proxier func(*http.Request) (*url.URL, error) // PingPeriod is a period for sending SPDY Pings on the connection. // Optional. PingPeriod time.Duration + // UpgradeTransport is a subtitute transport used for dialing. If set, + // this field will be used instead of "TLS" and "Proxier" for connection creation. + // Optional. + UpgradeTransport http.RoundTripper } // TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during @@ -123,7 +146,13 @@ func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config { // Dial implements k8s.io/apimachinery/pkg/util/net.Dialer. func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) { - conn, err := s.dial(req) + var conn net.Conn + var err error + if s.upgradeTransport != nil { + conn, err = apiproxy.DialURL(req.Context(), req.URL, s.upgradeTransport) + } else { + conn, err = s.dial(req) + } if err != nil { return nil, err } diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 0ea88156be..f358c794d1 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -72,14 +72,14 @@ func FromString(val string) IntOrString { return IntOrString{Type: String, StrVal: val} } -// Parse the given string and try to convert it to an integer before +// Parse the given string and try to convert it to an int32 integer before // setting it as a string value. func Parse(val string) IntOrString { - i, err := strconv.Atoi(val) + i, err := strconv.ParseInt(val, 10, 32) if err != nil { return FromString(val) } - return FromInt(i) + return FromInt32(int32(i)) } // UnmarshalJSON implements the json.Unmarshaller interface. diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go index 2112c9ab7e..786ad991c2 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/merge" + "sigs.k8s.io/structured-merge-diff/v4/typed" ) type structuredMergeManager struct { @@ -95,11 +96,11 @@ func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed if err != nil { return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) } - newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned) + newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned, typed.AllowDuplicates) if err != nil { return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", objectGVKNN(newObjVersioned), err) } - liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned, typed.AllowDuplicates) if err != nil { return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", objectGVKNN(liveObjVersioned), err) } @@ -139,11 +140,13 @@ func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) } + // Don't allow duplicates in the applied object. patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj) if err != nil { return nil, nil, fmt.Errorf("failed to create typed patch object (%v): %v", objectGVKNN(patchObj), err) } - liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned, typed.AllowDuplicates) if err != nil { return nil, nil, fmt.Errorf("failed to create typed live object (%v): %v", objectGVKNN(liveObjVersioned), err) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go index 1ac96d7f7b..c6449467cf 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go @@ -32,7 +32,7 @@ import ( // TypeConverter allows you to convert from runtime.Object to // typed.TypedValue and the other way around. type TypeConverter interface { - ObjectToTyped(runtime.Object) (*typed.TypedValue, error) + ObjectToTyped(runtime.Object, ...typed.ValidationOptions) (*typed.TypedValue, error) TypedToObject(*typed.TypedValue) (runtime.Object, error) } @@ -54,7 +54,7 @@ func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields return &typeConverter{parser: tr}, nil } -func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { +func (c *typeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { gvk := obj.GetObjectKind().GroupVersionKind() t := c.parser[gvk] if t == nil { @@ -62,9 +62,9 @@ func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, er } switch o := obj.(type) { case *unstructured.Unstructured: - return t.FromUnstructured(o.UnstructuredContent()) + return t.FromUnstructured(o.UnstructuredContent(), opts...) default: - return t.FromStructured(obj) + return t.FromStructured(obj, opts...) } } @@ -84,12 +84,12 @@ func NewDeducedTypeConverter() TypeConverter { } // ObjectToTyped converts an object into a TypedValue with a "deduced type". -func (deducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { +func (deducedTypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { switch o := obj.(type) { case *unstructured.Unstructured: - return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent()) + return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent(), opts...) default: - return typed.DeducedParseableType.FromStructured(obj) + return typed.DeducedParseableType.FromStructured(obj, opts...) } } diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go new file mode 100644 index 0000000000..e5196d1ee8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go @@ -0,0 +1,122 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/url" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/third_party/forked/golang/netutil" + "k8s.io/klog/v2" +) + +// DialURL will dial the specified URL using the underlying dialer held by the passed +// RoundTripper. The primary use of this method is to support proxying upgradable connections. +// For this reason this method will prefer to negotiate http/1.1 if the URL scheme is https. +// If you wish to ensure ALPN negotiates http2 then set NextProto=[]string{"http2"} in the +// TLSConfig of the http.Transport +func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (net.Conn, error) { + dialAddr := netutil.CanonicalAddr(url) + + dialer, err := utilnet.DialerFor(transport) + if err != nil { + klog.V(5).Infof("Unable to unwrap transport %T to get dialer: %v", transport, err) + } + + switch url.Scheme { + case "http": + if dialer != nil { + return dialer(ctx, "tcp", dialAddr) + } + var d net.Dialer + return d.DialContext(ctx, "tcp", dialAddr) + case "https": + // Get the tls config from the transport if we recognize it + tlsConfig, err := utilnet.TLSClientConfig(transport) + if err != nil { + klog.V(5).Infof("Unable to unwrap transport %T to get at TLS config: %v", transport, err) + } + + if dialer != nil { + // We have a dialer; use it to open the connection, then + // create a tls client using the connection. + netConn, err := dialer(ctx, "tcp", dialAddr) + if err != nil { + return nil, err + } + if tlsConfig == nil { + // tls.Client requires non-nil config + klog.Warning("using custom dialer with no TLSClientConfig. Defaulting to InsecureSkipVerify") + // tls.Handshake() requires ServerName or InsecureSkipVerify + tlsConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } else if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify { + // tls.HandshakeContext() requires ServerName or InsecureSkipVerify + // infer the ServerName from the hostname we're connecting to. + inferredHost := dialAddr + if host, _, err := net.SplitHostPort(dialAddr); err == nil { + inferredHost = host + } + // Make a copy to avoid polluting the provided config + tlsConfigCopy := tlsConfig.Clone() + tlsConfigCopy.ServerName = inferredHost + tlsConfig = tlsConfigCopy + } + + // Since this method is primarily used within a "Connection: Upgrade" call we assume the caller is + // going to write HTTP/1.1 request to the wire. http2 should not be allowed in the TLSConfig.NextProtos, + // so we explicitly set that here. We only do this check if the TLSConfig support http/1.1. + if supportsHTTP11(tlsConfig.NextProtos) { + tlsConfig = tlsConfig.Clone() + tlsConfig.NextProtos = []string{"http/1.1"} + } + + tlsConn := tls.Client(netConn, tlsConfig) + if err := tlsConn.HandshakeContext(ctx); err != nil { + netConn.Close() + return nil, err + } + return tlsConn, nil + } else { + // Dial. + tlsDialer := tls.Dialer{ + Config: tlsConfig, + } + return tlsDialer.DialContext(ctx, "tcp", dialAddr) + } + default: + return nil, fmt.Errorf("unknown scheme: %s", url.Scheme) + } +} + +func supportsHTTP11(nextProtos []string) bool { + if len(nextProtos) == 0 { + return true + } + for _, proto := range nextProtos { + if proto == "http/1.1" { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go new file mode 100644 index 0000000000..d14ecfad54 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package proxy provides transport and upgrade support for proxies. +package proxy // import "k8s.io/apimachinery/pkg/util/proxy" diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go new file mode 100644 index 0000000000..5a2dd6e14c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go @@ -0,0 +1,272 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "bytes" + "compress/flate" + "compress/gzip" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strings" + + "golang.org/x/net/html" + "golang.org/x/net/html/atom" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/sets" +) + +// atomsToAttrs states which attributes of which tags require URL substitution. +// Sources: http://www.w3.org/TR/REC-html40/index/attributes.html +// +// http://www.w3.org/html/wg/drafts/html/master/index.html#attributes-1 +var atomsToAttrs = map[atom.Atom]sets.String{ + atom.A: sets.NewString("href"), + atom.Applet: sets.NewString("codebase"), + atom.Area: sets.NewString("href"), + atom.Audio: sets.NewString("src"), + atom.Base: sets.NewString("href"), + atom.Blockquote: sets.NewString("cite"), + atom.Body: sets.NewString("background"), + atom.Button: sets.NewString("formaction"), + atom.Command: sets.NewString("icon"), + atom.Del: sets.NewString("cite"), + atom.Embed: sets.NewString("src"), + atom.Form: sets.NewString("action"), + atom.Frame: sets.NewString("longdesc", "src"), + atom.Head: sets.NewString("profile"), + atom.Html: sets.NewString("manifest"), + atom.Iframe: sets.NewString("longdesc", "src"), + atom.Img: sets.NewString("longdesc", "src", "usemap"), + atom.Input: sets.NewString("src", "usemap", "formaction"), + atom.Ins: sets.NewString("cite"), + atom.Link: sets.NewString("href"), + atom.Object: sets.NewString("classid", "codebase", "data", "usemap"), + atom.Q: sets.NewString("cite"), + atom.Script: sets.NewString("src"), + atom.Source: sets.NewString("src"), + atom.Video: sets.NewString("poster", "src"), + + // TODO: css URLs hidden in style elements. +} + +// Transport is a transport for text/html content that replaces URLs in html +// content with the prefix of the proxy server +type Transport struct { + Scheme string + Host string + PathPrepend string + + http.RoundTripper +} + +// RoundTrip implements the http.RoundTripper interface +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + // Add reverse proxy headers. + forwardedURI := path.Join(t.PathPrepend, req.URL.EscapedPath()) + if strings.HasSuffix(req.URL.Path, "/") { + forwardedURI = forwardedURI + "/" + } + req.Header.Set("X-Forwarded-Uri", forwardedURI) + if len(t.Host) > 0 { + req.Header.Set("X-Forwarded-Host", t.Host) + } + if len(t.Scheme) > 0 { + req.Header.Set("X-Forwarded-Proto", t.Scheme) + } + + rt := t.RoundTripper + if rt == nil { + rt = http.DefaultTransport + } + resp, err := rt.RoundTrip(req) + + if err != nil { + return nil, errors.NewServiceUnavailable(fmt.Sprintf("error trying to reach service: %v", err)) + } + + if redirect := resp.Header.Get("Location"); redirect != "" { + targetURL, err := url.Parse(redirect) + if err != nil { + return nil, errors.NewInternalError(fmt.Errorf("error trying to parse Location header: %v", err)) + } + resp.Header.Set("Location", t.rewriteURL(targetURL, req.URL, req.Host)) + return resp, nil + } + + cType := resp.Header.Get("Content-Type") + cType = strings.TrimSpace(strings.SplitN(cType, ";", 2)[0]) + if cType != "text/html" { + // Do nothing, simply pass through + return resp, nil + } + + return t.rewriteResponse(req, resp) +} + +var _ = net.RoundTripperWrapper(&Transport{}) + +func (rt *Transport) WrappedRoundTripper() http.RoundTripper { + return rt.RoundTripper +} + +// rewriteURL rewrites a single URL to go through the proxy, if the URL refers +// to the same host as sourceURL, which is the page on which the target URL +// occurred, or if the URL matches the sourceRequestHost. +func (t *Transport) rewriteURL(url *url.URL, sourceURL *url.URL, sourceRequestHost string) string { + // Example: + // When API server processes a proxy request to a service (e.g. /api/v1/namespace/foo/service/bar/proxy/), + // the sourceURL.Host (i.e. req.URL.Host) is the endpoint IP address of the service. The + // sourceRequestHost (i.e. req.Host) is the Host header that specifies the host on which the + // URL is sought, which can be different from sourceURL.Host. For example, if user sends the + // request through "kubectl proxy" locally (i.e. localhost:8001/api/v1/namespace/foo/service/bar/proxy/), + // sourceRequestHost is "localhost:8001". + // + // If the service's response URL contains non-empty host, and url.Host is equal to either sourceURL.Host + // or sourceRequestHost, we should not consider the returned URL to be a completely different host. + // It's the API server's responsibility to rewrite a same-host-and-absolute-path URL and append the + // necessary URL prefix (i.e. /api/v1/namespace/foo/service/bar/proxy/). + isDifferentHost := url.Host != "" && url.Host != sourceURL.Host && url.Host != sourceRequestHost + isRelative := !strings.HasPrefix(url.Path, "/") + if isDifferentHost || isRelative { + return url.String() + } + + // Do not rewrite scheme and host if the Transport has empty scheme and host + // when targetURL already contains the sourceRequestHost + if !(url.Host == sourceRequestHost && t.Scheme == "" && t.Host == "") { + url.Scheme = t.Scheme + url.Host = t.Host + } + + origPath := url.Path + // Do not rewrite URL if the sourceURL already contains the necessary prefix. + if strings.HasPrefix(url.Path, t.PathPrepend) { + return url.String() + } + url.Path = path.Join(t.PathPrepend, url.Path) + if strings.HasSuffix(origPath, "/") { + // Add back the trailing slash, which was stripped by path.Join(). + url.Path += "/" + } + + return url.String() +} + +// rewriteHTML scans the HTML for tags with url-valued attributes, and updates +// those values with the urlRewriter function. The updated HTML is output to the +// writer. +func rewriteHTML(reader io.Reader, writer io.Writer, urlRewriter func(*url.URL) string) error { + // Note: This assumes the content is UTF-8. + tokenizer := html.NewTokenizer(reader) + + var err error + for err == nil { + tokenType := tokenizer.Next() + switch tokenType { + case html.ErrorToken: + err = tokenizer.Err() + case html.StartTagToken, html.SelfClosingTagToken: + token := tokenizer.Token() + if urlAttrs, ok := atomsToAttrs[token.DataAtom]; ok { + for i, attr := range token.Attr { + if urlAttrs.Has(attr.Key) { + url, err := url.Parse(attr.Val) + if err != nil { + // Do not rewrite the URL if it isn't valid. It is intended not + // to error here to prevent the inability to understand the + // content of the body to cause a fatal error. + continue + } + token.Attr[i].Val = urlRewriter(url) + } + } + } + _, err = writer.Write([]byte(token.String())) + default: + _, err = writer.Write(tokenizer.Raw()) + } + } + if err != io.EOF { + return err + } + return nil +} + +// rewriteResponse modifies an HTML response by updating absolute links referring +// to the original host to instead refer to the proxy transport. +func (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*http.Response, error) { + origBody := resp.Body + defer origBody.Close() + + newContent := &bytes.Buffer{} + var reader io.Reader = origBody + var writer io.Writer = newContent + encoding := resp.Header.Get("Content-Encoding") + switch encoding { + case "gzip": + var err error + reader, err = gzip.NewReader(reader) + if err != nil { + return nil, fmt.Errorf("errorf making gzip reader: %v", err) + } + gzw := gzip.NewWriter(writer) + defer gzw.Close() + writer = gzw + case "deflate": + var err error + reader = flate.NewReader(reader) + flw, err := flate.NewWriter(writer, flate.BestCompression) + if err != nil { + return nil, fmt.Errorf("errorf making flate writer: %v", err) + } + defer func() { + flw.Close() + flw.Flush() + }() + writer = flw + case "": + // This is fine + default: + // Some encoding we don't understand-- don't try to parse this + klog.Errorf("Proxy encountered encoding %v for text/html; can't understand this so not fixing links.", encoding) + return resp, nil + } + + urlRewriter := func(targetUrl *url.URL) string { + return t.rewriteURL(targetUrl, req.URL, req.Host) + } + err := rewriteHTML(reader, writer, urlRewriter) + if err != nil { + klog.Errorf("Failed to rewrite URLs: %v", err) + return resp, err + } + + resp.Body = io.NopCloser(newContent) + // Update header node with new content-length + // TODO: Remove any hash/signature headers here? + resp.Header.Del("Content-Length") + resp.ContentLength = int64(newContent.Len()) + + return resp, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go new file mode 100644 index 0000000000..76acdfb4ac --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go @@ -0,0 +1,556 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "bufio" + "bytes" + "fmt" + "io" + "log" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/httpstream" + utilnet "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + "github.com/mxk/go-flowrate/flowrate" + "k8s.io/klog/v2" +) + +// UpgradeRequestRoundTripper provides an additional method to decorate a request +// with any authentication or other protocol level information prior to performing +// an upgrade on the server. Any response will be handled by the intercepting +// proxy. +type UpgradeRequestRoundTripper interface { + http.RoundTripper + // WrapRequest takes a valid HTTP request and returns a suitably altered version + // of request with any HTTP level values required to complete the request half of + // an upgrade on the server. It does not get a chance to see the response and + // should bypass any request side logic that expects to see the response. + WrapRequest(*http.Request) (*http.Request, error) +} + +// UpgradeAwareHandler is a handler for proxy requests that may require an upgrade +type UpgradeAwareHandler struct { + // UpgradeRequired will reject non-upgrade connections if true. + UpgradeRequired bool + // Location is the location of the upstream proxy. It is used as the location to Dial on the upstream server + // for upgrade requests unless UseRequestLocationOnUpgrade is true. + Location *url.URL + // AppendLocationPath determines if the original path of the Location should be appended to the upstream proxy request path + AppendLocationPath bool + // Transport provides an optional round tripper to use to proxy. If nil, the default proxy transport is used + Transport http.RoundTripper + // UpgradeTransport, if specified, will be used as the backend transport when upgrade requests are provided. + // This allows clients to disable HTTP/2. + UpgradeTransport UpgradeRequestRoundTripper + // WrapTransport indicates whether the provided Transport should be wrapped with default proxy transport behavior (URL rewriting, X-Forwarded-* header setting) + WrapTransport bool + // UseRequestLocation will use the incoming request URL when talking to the backend server. + UseRequestLocation bool + // UseLocationHost overrides the HTTP host header in requests to the backend server to use the Host from Location. + // This will override the req.Host field of a request, while UseRequestLocation will override the req.URL field + // of a request. The req.URL.Host specifies the server to connect to, while the req.Host field + // specifies the Host header value to send in the HTTP request. If this is false, the incoming req.Host header will + // just be forwarded to the backend server. + UseLocationHost bool + // FlushInterval controls how often the standard HTTP proxy will flush content from the upstream. + FlushInterval time.Duration + // MaxBytesPerSec controls the maximum rate for an upstream connection. No rate is imposed if the value is zero. + MaxBytesPerSec int64 + // Responder is passed errors that occur while setting up proxying. + Responder ErrorResponder + // Reject to forward redirect response + RejectForwardingRedirects bool +} + +const defaultFlushInterval = 200 * time.Millisecond + +// ErrorResponder abstracts error reporting to the proxy handler to remove the need to hardcode a particular +// error format. +type ErrorResponder interface { + Error(w http.ResponseWriter, req *http.Request, err error) +} + +// SimpleErrorResponder is the legacy implementation of ErrorResponder for callers that only +// service a single request/response per proxy. +type SimpleErrorResponder interface { + Error(err error) +} + +func NewErrorResponder(r SimpleErrorResponder) ErrorResponder { + return simpleResponder{r} +} + +type simpleResponder struct { + responder SimpleErrorResponder +} + +func (r simpleResponder) Error(w http.ResponseWriter, req *http.Request, err error) { + r.responder.Error(err) +} + +// upgradeRequestRoundTripper implements proxy.UpgradeRequestRoundTripper. +type upgradeRequestRoundTripper struct { + http.RoundTripper + upgrader http.RoundTripper +} + +var ( + _ UpgradeRequestRoundTripper = &upgradeRequestRoundTripper{} + _ utilnet.RoundTripperWrapper = &upgradeRequestRoundTripper{} +) + +// WrappedRoundTripper returns the round tripper that a caller would use. +func (rt *upgradeRequestRoundTripper) WrappedRoundTripper() http.RoundTripper { + return rt.RoundTripper +} + +// WriteToRequest calls the nested upgrader and then copies the returned request +// fields onto the passed request. +func (rt *upgradeRequestRoundTripper) WrapRequest(req *http.Request) (*http.Request, error) { + resp, err := rt.upgrader.RoundTrip(req) + if err != nil { + return nil, err + } + return resp.Request, nil +} + +// onewayRoundTripper captures the provided request - which is assumed to have +// been modified by other round trippers - and then returns a fake response. +type onewayRoundTripper struct{} + +// RoundTrip returns a simple 200 OK response that captures the provided request. +func (onewayRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return &http.Response{ + Status: "200 OK", + StatusCode: http.StatusOK, + Body: io.NopCloser(&bytes.Buffer{}), + Request: req, + }, nil +} + +// MirrorRequest is a round tripper that can be called to get back the calling request as +// the core round tripper in a chain. +var MirrorRequest http.RoundTripper = onewayRoundTripper{} + +// NewUpgradeRequestRoundTripper takes two round trippers - one for the underlying TCP connection, and +// one that is able to write headers to an HTTP request. The request rt is used to set the request headers +// and that is written to the underlying connection rt. +func NewUpgradeRequestRoundTripper(connection, request http.RoundTripper) UpgradeRequestRoundTripper { + return &upgradeRequestRoundTripper{ + RoundTripper: connection, + upgrader: request, + } +} + +// normalizeLocation returns the result of parsing the full URL, with scheme set to http if missing +func normalizeLocation(location *url.URL) *url.URL { + normalized, _ := url.Parse(location.String()) + if len(normalized.Scheme) == 0 { + normalized.Scheme = "http" + } + return normalized +} + +// NewUpgradeAwareHandler creates a new proxy handler with a default flush interval. Responder is required for returning +// errors to the caller. +func NewUpgradeAwareHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder ErrorResponder) *UpgradeAwareHandler { + return &UpgradeAwareHandler{ + Location: normalizeLocation(location), + Transport: transport, + WrapTransport: wrapTransport, + UpgradeRequired: upgradeRequired, + FlushInterval: defaultFlushInterval, + Responder: responder, + } +} + +func proxyRedirectsforRootPath(path string, w http.ResponseWriter, req *http.Request) bool { + redirect := false + method := req.Method + + // From pkg/genericapiserver/endpoints/handlers/proxy.go#ServeHTTP: + // Redirect requests with an empty path to a location that ends with a '/' + // This is essentially a hack for https://issue.k8s.io/4958. + // Note: Keep this code after tryUpgrade to not break that flow. + if len(path) == 0 && (method == http.MethodGet || method == http.MethodHead) { + var queryPart string + if len(req.URL.RawQuery) > 0 { + queryPart = "?" + req.URL.RawQuery + } + w.Header().Set("Location", req.URL.Path+"/"+queryPart) + w.WriteHeader(http.StatusMovedPermanently) + redirect = true + } + return redirect +} + +// ServeHTTP handles the proxy request +func (h *UpgradeAwareHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if h.tryUpgrade(w, req) { + return + } + if h.UpgradeRequired { + h.Responder.Error(w, req, errors.NewBadRequest("Upgrade request required")) + return + } + + loc := *h.Location + loc.RawQuery = req.URL.RawQuery + + // If original request URL ended in '/', append a '/' at the end of the + // of the proxy URL + if !strings.HasSuffix(loc.Path, "/") && strings.HasSuffix(req.URL.Path, "/") { + loc.Path += "/" + } + + proxyRedirect := proxyRedirectsforRootPath(loc.Path, w, req) + if proxyRedirect { + return + } + + if h.Transport == nil || h.WrapTransport { + h.Transport = h.defaultProxyTransport(req.URL, h.Transport) + } + + // WithContext creates a shallow clone of the request with the same context. + newReq := req.WithContext(req.Context()) + newReq.Header = utilnet.CloneHeader(req.Header) + if !h.UseRequestLocation { + newReq.URL = &loc + } + if h.UseLocationHost { + // exchanging req.Host with the backend location is necessary for backends that act on the HTTP host header (e.g. API gateways), + // because req.Host has preference over req.URL.Host in filling this header field + newReq.Host = h.Location.Host + } + + // create the target location to use for the reverse proxy + reverseProxyLocation := &url.URL{Scheme: h.Location.Scheme, Host: h.Location.Host} + if h.AppendLocationPath { + reverseProxyLocation.Path = h.Location.Path + } + + proxy := httputil.NewSingleHostReverseProxy(reverseProxyLocation) + proxy.Transport = h.Transport + proxy.FlushInterval = h.FlushInterval + proxy.ErrorLog = log.New(noSuppressPanicError{}, "", log.LstdFlags) + if h.RejectForwardingRedirects { + oldModifyResponse := proxy.ModifyResponse + proxy.ModifyResponse = func(response *http.Response) error { + code := response.StatusCode + if code >= 300 && code <= 399 && len(response.Header.Get("Location")) > 0 { + // close the original response + response.Body.Close() + msg := "the backend attempted to redirect this request, which is not permitted" + // replace the response + *response = http.Response{ + StatusCode: http.StatusBadGateway, + Status: fmt.Sprintf("%d %s", response.StatusCode, http.StatusText(response.StatusCode)), + Body: io.NopCloser(strings.NewReader(msg)), + ContentLength: int64(len(msg)), + } + } else { + if oldModifyResponse != nil { + if err := oldModifyResponse(response); err != nil { + return err + } + } + } + return nil + } + } + if h.Responder != nil { + // if an optional error interceptor/responder was provided wire it + // the custom responder might be used for providing a unified error reporting + // or supporting retry mechanisms by not sending non-fatal errors to the clients + proxy.ErrorHandler = h.Responder.Error + } + proxy.ServeHTTP(w, newReq) +} + +type noSuppressPanicError struct{} + +func (noSuppressPanicError) Write(p []byte) (n int, err error) { + // skip "suppressing panic for copyResponse error in test; copy error" error message + // that ends up in CI tests on each kube-apiserver termination as noise and + // everybody thinks this is fatal. + if strings.Contains(string(p), "suppressing panic") { + return len(p), nil + } + return os.Stderr.Write(p) +} + +// tryUpgrade returns true if the request was handled. +func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Request) bool { + if !httpstream.IsUpgradeRequest(req) { + klog.V(6).Infof("Request was not an upgrade") + return false + } + + var ( + backendConn net.Conn + rawResponse []byte + err error + ) + + location := *h.Location + if h.UseRequestLocation { + location = *req.URL + location.Scheme = h.Location.Scheme + location.Host = h.Location.Host + if h.AppendLocationPath { + location.Path = singleJoiningSlash(h.Location.Path, location.Path) + } + } + + clone := utilnet.CloneRequest(req) + // Only append X-Forwarded-For in the upgrade path, since httputil.NewSingleHostReverseProxy + // handles this in the non-upgrade path. + utilnet.AppendForwardedForHeader(clone) + klog.V(6).Infof("Connecting to backend proxy (direct dial) %s\n Headers: %v", &location, clone.Header) + if h.UseLocationHost { + clone.Host = h.Location.Host + } + clone.URL = &location + backendConn, err = h.DialForUpgrade(clone) + if err != nil { + klog.V(6).Infof("Proxy connection error: %v", err) + h.Responder.Error(w, req, err) + return true + } + defer backendConn.Close() + + // determine the http response code from the backend by reading from rawResponse+backendConn + backendHTTPResponse, headerBytes, err := getResponse(io.MultiReader(bytes.NewReader(rawResponse), backendConn)) + if err != nil { + klog.V(6).Infof("Proxy connection error: %v", err) + h.Responder.Error(w, req, err) + return true + } + if len(headerBytes) > len(rawResponse) { + // we read beyond the bytes stored in rawResponse, update rawResponse to the full set of bytes read from the backend + rawResponse = headerBytes + } + + // If the backend did not upgrade the request, return an error to the client. If the response was + // an error, the error is forwarded directly after the connection is hijacked. Otherwise, just + // return a generic error here. + if backendHTTPResponse.StatusCode != http.StatusSwitchingProtocols && backendHTTPResponse.StatusCode < 400 { + err := fmt.Errorf("invalid upgrade response: status code %d", backendHTTPResponse.StatusCode) + klog.Errorf("Proxy upgrade error: %v", err) + h.Responder.Error(w, req, err) + return true + } + + // Once the connection is hijacked, the ErrorResponder will no longer work, so + // hijacking should be the last step in the upgrade. + requestHijacker, ok := w.(http.Hijacker) + if !ok { + klog.V(6).Infof("Unable to hijack response writer: %T", w) + h.Responder.Error(w, req, fmt.Errorf("request connection cannot be hijacked: %T", w)) + return true + } + requestHijackedConn, _, err := requestHijacker.Hijack() + if err != nil { + klog.V(6).Infof("Unable to hijack response: %v", err) + h.Responder.Error(w, req, fmt.Errorf("error hijacking connection: %v", err)) + return true + } + defer requestHijackedConn.Close() + + if backendHTTPResponse.StatusCode != http.StatusSwitchingProtocols { + // If the backend did not upgrade the request, echo the response from the backend to the client and return, closing the connection. + klog.V(6).Infof("Proxy upgrade error, status code %d", backendHTTPResponse.StatusCode) + // set read/write deadlines + deadline := time.Now().Add(10 * time.Second) + backendConn.SetReadDeadline(deadline) + requestHijackedConn.SetWriteDeadline(deadline) + // write the response to the client + err := backendHTTPResponse.Write(requestHijackedConn) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + klog.Errorf("Error proxying data from backend to client: %v", err) + } + // Indicate we handled the request + return true + } + + // Forward raw response bytes back to client. + if len(rawResponse) > 0 { + klog.V(6).Infof("Writing %d bytes to hijacked connection", len(rawResponse)) + if _, err = requestHijackedConn.Write(rawResponse); err != nil { + utilruntime.HandleError(fmt.Errorf("Error proxying response from backend to client: %v", err)) + } + } + + // Proxy the connection. This is bidirectional, so we need a goroutine + // to copy in each direction. Once one side of the connection exits, we + // exit the function which performs cleanup and in the process closes + // the other half of the connection in the defer. + writerComplete := make(chan struct{}) + readerComplete := make(chan struct{}) + + go func() { + var writer io.WriteCloser + if h.MaxBytesPerSec > 0 { + writer = flowrate.NewWriter(backendConn, h.MaxBytesPerSec) + } else { + writer = backendConn + } + _, err := io.Copy(writer, requestHijackedConn) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + klog.Errorf("Error proxying data from client to backend: %v", err) + } + close(writerComplete) + }() + + go func() { + var reader io.ReadCloser + if h.MaxBytesPerSec > 0 { + reader = flowrate.NewReader(backendConn, h.MaxBytesPerSec) + } else { + reader = backendConn + } + _, err := io.Copy(requestHijackedConn, reader) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + klog.Errorf("Error proxying data from backend to client: %v", err) + } + close(readerComplete) + }() + + // Wait for one half the connection to exit. Once it does the defer will + // clean up the other half of the connection. + select { + case <-writerComplete: + case <-readerComplete: + } + klog.V(6).Infof("Disconnecting from backend proxy %s\n Headers: %v", &location, clone.Header) + + return true +} + +// FIXME: Taken from net/http/httputil/reverseproxy.go as singleJoiningSlash is not exported to be re-used. +// See-also: https://github.com/golang/go/issues/44290 +func singleJoiningSlash(a, b string) string { + aslash := strings.HasSuffix(a, "/") + bslash := strings.HasPrefix(b, "/") + switch { + case aslash && bslash: + return a + b[1:] + case !aslash && !bslash: + return a + "/" + b + } + return a + b +} + +func (h *UpgradeAwareHandler) DialForUpgrade(req *http.Request) (net.Conn, error) { + if h.UpgradeTransport == nil { + return dial(req, h.Transport) + } + updatedReq, err := h.UpgradeTransport.WrapRequest(req) + if err != nil { + return nil, err + } + return dial(updatedReq, h.UpgradeTransport) +} + +// getResponseCode reads a http response from the given reader, returns the response, +// the bytes read from the reader, and any error encountered +func getResponse(r io.Reader) (*http.Response, []byte, error) { + rawResponse := bytes.NewBuffer(make([]byte, 0, 256)) + // Save the bytes read while reading the response headers into the rawResponse buffer + resp, err := http.ReadResponse(bufio.NewReader(io.TeeReader(r, rawResponse)), nil) + if err != nil { + return nil, nil, err + } + // return the http response and the raw bytes consumed from the reader in the process + return resp, rawResponse.Bytes(), nil +} + +// dial dials the backend at req.URL and writes req to it. +func dial(req *http.Request, transport http.RoundTripper) (net.Conn, error) { + conn, err := DialURL(req.Context(), req.URL, transport) + if err != nil { + return nil, fmt.Errorf("error dialing backend: %v", err) + } + + if err = req.Write(conn); err != nil { + conn.Close() + return nil, fmt.Errorf("error sending request: %v", err) + } + + return conn, err +} + +func (h *UpgradeAwareHandler) defaultProxyTransport(url *url.URL, internalTransport http.RoundTripper) http.RoundTripper { + scheme := url.Scheme + host := url.Host + suffix := h.Location.Path + if strings.HasSuffix(url.Path, "/") && !strings.HasSuffix(suffix, "/") { + suffix += "/" + } + pathPrepend := strings.TrimSuffix(url.Path, suffix) + rewritingTransport := &Transport{ + Scheme: scheme, + Host: host, + PathPrepend: pathPrepend, + RoundTripper: internalTransport, + } + return &corsRemovingTransport{ + RoundTripper: rewritingTransport, + } +} + +// corsRemovingTransport is a wrapper for an internal transport. It removes CORS headers +// from the internal response. +// Implements pkg/util/net.RoundTripperWrapper +type corsRemovingTransport struct { + http.RoundTripper +} + +var _ = utilnet.RoundTripperWrapper(&corsRemovingTransport{}) + +func (rt *corsRemovingTransport) RoundTrip(req *http.Request) (*http.Response, error) { + resp, err := rt.RoundTripper.RoundTrip(req) + if err != nil { + return nil, err + } + removeCORSHeaders(resp) + return resp, nil +} + +func (rt *corsRemovingTransport) WrappedRoundTripper() http.RoundTripper { + return rt.RoundTripper +} + +// removeCORSHeaders strip CORS headers sent from the backend +// This should be called on all responses before returning +func removeCORSHeaders(resp *http.Response) { + resp.Header.Del("Access-Control-Allow-Credentials") + resp.Header.Del("Access-Control-Allow-Headers") + resp.Header.Del("Access-Control-Allow-Methods") + resp.Header.Del("Access-Control-Allow-Origin") +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go index 237ebaef48..ba153ee24f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go +++ b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go @@ -46,8 +46,22 @@ const ( // adds support for exit codes. StreamProtocolV4Name = "v4.channel.k8s.io" + // The subprotocol "v5.channel.k8s.io" is used for remote command + // attachment/execution. It is the 5th version of the subprotocol and + // adds support for a CLOSE signal. + StreamProtocolV5Name = "v5.channel.k8s.io" + NonZeroExitCodeReason = metav1.StatusReason("NonZeroExitCode") ExitCodeCauseType = metav1.CauseType("ExitCode") + + // RemoteCommand stream identifiers. The first three identifiers (for STDIN, + // STDOUT, STDERR) are the same as their file descriptors. + StreamStdIn = 0 + StreamStdOut = 1 + StreamStdErr = 2 + StreamErr = 3 + StreamResize = 4 + StreamClose = 255 ) var SupportedStreamingProtocols = []string{StreamProtocolV4Name, StreamProtocolV3Name, StreamProtocolV2Name, StreamProtocolV1Name} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go index df305b712c..85b0cfc072 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -20,12 +20,17 @@ import ( "errors" "fmt" "reflect" + "strings" "k8s.io/apimachinery/pkg/util/mergepatch" forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" openapi "k8s.io/kube-openapi/pkg/util/proto" + "k8s.io/kube-openapi/pkg/validation/spec" ) +const patchMergeKey = "x-kubernetes-patch-merge-key" +const patchStrategy = "x-kubernetes-patch-strategy" + type PatchMeta struct { patchStrategies []string patchMergeKey string @@ -148,6 +153,90 @@ func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { return t } +type PatchMetaFromOpenAPIV3 struct { + // SchemaList is required to resolve OpenAPI V3 references + SchemaList map[string]*spec.Schema + Schema *spec.Schema +} + +func (s PatchMetaFromOpenAPIV3) traverse(key string) (PatchMetaFromOpenAPIV3, error) { + if s.Schema == nil { + return PatchMetaFromOpenAPIV3{}, nil + } + if len(s.Schema.Properties) == 0 { + return PatchMetaFromOpenAPIV3{}, fmt.Errorf("unable to find api field \"%s\"", key) + } + subschema, ok := s.Schema.Properties[key] + if !ok { + return PatchMetaFromOpenAPIV3{}, fmt.Errorf("unable to find api field \"%s\"", key) + } + return PatchMetaFromOpenAPIV3{SchemaList: s.SchemaList, Schema: &subschema}, nil +} + +func resolve(l *PatchMetaFromOpenAPIV3) error { + if len(l.Schema.AllOf) > 0 { + l.Schema = &l.Schema.AllOf[0] + } + if refString := l.Schema.Ref.String(); refString != "" { + str := strings.TrimPrefix(refString, "#/components/schemas/") + sch, ok := l.SchemaList[str] + if ok { + l.Schema = sch + } else { + return fmt.Errorf("unable to resolve %s in OpenAPI V3", refString) + } + } + return nil +} + +func (s PatchMetaFromOpenAPIV3) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + l, err := s.traverse(key) + if err != nil { + return l, PatchMeta{}, err + } + p := PatchMeta{} + f, ok := l.Schema.Extensions[patchMergeKey] + if ok { + p.SetPatchMergeKey(f.(string)) + } + g, ok := l.Schema.Extensions[patchStrategy] + if ok { + p.SetPatchStrategies(strings.Split(g.(string), ",")) + } + + err = resolve(&l) + return l, p, err +} + +func (s PatchMetaFromOpenAPIV3) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + l, err := s.traverse(key) + if err != nil { + return l, PatchMeta{}, err + } + p := PatchMeta{} + f, ok := l.Schema.Extensions[patchMergeKey] + if ok { + p.SetPatchMergeKey(f.(string)) + } + g, ok := l.Schema.Extensions[patchStrategy] + if ok { + p.SetPatchStrategies(strings.Split(g.(string), ",")) + } + if l.Schema.Items != nil { + l.Schema = l.Schema.Items.Schema + } + err = resolve(&l) + return l, p, err +} + +func (s PatchMetaFromOpenAPIV3) Name() string { + schema := s.Schema + if len(schema.Type) > 0 { + return strings.Join(schema.Type, "") + } + return "Struct" +} + type PatchMetaFromOpenAPI struct { Schema openapi.Schema } diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index ae73bda966..bc387d0116 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -200,12 +200,12 @@ func Invalid(field *Path, value interface{}, detail string) *Error { // NotSupported returns a *Error indicating "unsupported value". // This is used to report unknown values for enumerated fields (e.g. a list of // valid values). -func NotSupported(field *Path, value interface{}, validValues []string) *Error { +func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *Error { detail := "" if len(validValues) > 0 { quotedValues := make([]string, len(validValues)) for i, v := range validValues { - quotedValues[i] = strconv.Quote(v) + quotedValues[i] = strconv.Quote(fmt.Sprint(v)) } detail = "supported values: " + strings.Join(quotedValues, ", ") } diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go index 4c61956953..2292ba1376 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/version/version.go +++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -18,6 +18,7 @@ package version import ( "bytes" + "errors" "fmt" "regexp" "strconv" @@ -85,6 +86,47 @@ func parse(str string, semver bool) (*Version, error) { return v, nil } +// HighestSupportedVersion returns the highest supported version +// This function assumes that the highest supported version must be v1.x. +func HighestSupportedVersion(versions []string) (*Version, error) { + if len(versions) == 0 { + return nil, errors.New("empty array for supported versions") + } + + var ( + highestSupportedVersion *Version + theErr error + ) + + for i := len(versions) - 1; i >= 0; i-- { + currentHighestVer, err := ParseGeneric(versions[i]) + if err != nil { + theErr = err + continue + } + + if currentHighestVer.Major() > 1 { + continue + } + + if highestSupportedVersion == nil || highestSupportedVersion.LessThan(currentHighestVer) { + highestSupportedVersion = currentHighestVer + } + } + + if highestSupportedVersion == nil { + return nil, fmt.Errorf( + "could not find a highest supported version from versions (%v) reported: %+v", + versions, theErr) + } + + if highestSupportedVersion.Major() != 1 { + return nil, fmt.Errorf("highest supported version reported is %v, must be v1.x", highestSupportedVersion) + } + + return highestSupportedVersion, nil +} + // ParseGeneric parses a "generic" version string. The version string must consist of two // or more dot-separated numeric fields (the first of which can't have leading zeroes), // followed by arbitrary uninterpreted data (which need not be separated from the final diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go index 0dd13c626c..107bfc132f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go @@ -40,6 +40,10 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding var timeCh <-chan time.Time doneCh := ctx.Done() + if !sliding { + timeCh = t.C() + } + // if immediate is true the condition is // guaranteed to be executed at least once, // if we haven't requested immediate execution, delay once @@ -50,17 +54,27 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding }(); err != nil || ok { return err } - } else { + } + + if sliding { timeCh = t.C() + } + + for { + + // Wait for either the context to be cancelled or the next invocation be called select { case <-doneCh: return ctx.Err() case <-timeCh: } - } - for { - // checking ctx.Err() is slightly faster than checking a select + // IMPORTANT: Because there is no channel priority selection in golang + // it is possible for very short timers to "win" the race in the previous select + // repeatedly even when the context has been canceled. We therefore must + // explicitly check for context cancellation on every loop and exit if true to + // guarantee that we don't invoke condition more than once after context has + // been cancelled. if err := ctx.Err(); err != nil { return err } @@ -77,21 +91,5 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding if sliding { t.Next() } - - if timeCh == nil { - timeCh = t.C() - } - - // NOTE: b/c there is no priority selection in golang - // it is possible for this to race, meaning we could - // trigger t.C and doneCh, and t.C select falls through. - // In order to mitigate we re-check doneCh at the beginning - // of every loop to guarantee at-most one extra execution - // of condition. - select { - case <-doneCh: - return ctx.Err() - case <-timeCh: - } } } diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go index 03f4b5332b..9c16835096 100644 --- a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go @@ -27,6 +27,8 @@ import ( "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" "k8s.io/client-go/discovery" diskcached "k8s.io/client-go/discovery/cached/disk" "k8s.io/client-go/rest" @@ -122,6 +124,9 @@ type ConfigFlags struct { // Allows increasing qps used for discovery, this is useful // in clusters with many registered resources discoveryQPS float32 + // Allows all possible warnings are printed in a standardized + // format. + warningPrinter *printers.WarningPrinter } // ToRESTConfig implements RESTClientGetter. @@ -332,7 +337,11 @@ func (f *ConfigFlags) toRESTMapper() (meta.RESTMapper, error) { } mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient) - expander := restmapper.NewShortcutExpander(mapper, discoveryClient) + expander := restmapper.NewShortcutExpander(mapper, discoveryClient, func(a string) { + if f.warningPrinter != nil { + f.warningPrinter.Print(a) + } + }) return expander, nil } @@ -428,6 +437,12 @@ func (f *ConfigFlags) WithWrapConfigFn(wrapConfigFn func(*rest.Config) *rest.Con return f } +// WithWarningPrinter initializes WarningPrinter with the given IOStreams +func (f *ConfigFlags) WithWarningPrinter(ioStreams genericiooptions.IOStreams) *ConfigFlags { + f.warningPrinter = printers.NewWarningPrinter(ioStreams.ErrOut, printers.WarningPrinterOptions{Color: printers.AllowsColorOutput(ioStreams.ErrOut)}) + return f +} + // NewConfigFlags returns ConfigFlags with default values set func NewConfigFlags(usePersistentConfig bool) *ConfigFlags { impersonateGroup := []string{} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go index 7a96481552..9bb5e28c94 100644 --- a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go @@ -66,7 +66,7 @@ func (f *TestConfigFlags) ToRESTMapper() (meta.RESTMapper, error) { } if f.discoveryClient != nil { mapper := restmapper.NewDeferredDiscoveryRESTMapper(f.discoveryClient) - expander := restmapper.NewShortcutExpander(mapper, f.discoveryClient) + expander := restmapper.NewShortcutExpander(mapper, f.discoveryClient, nil) return expander, nil } return nil, fmt.Errorf("no restmapper") diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go index 3d3e20a2f5..b831351398 100644 --- a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go @@ -18,7 +18,7 @@ package genericclioptions import ( "bytes" - "io/ioutil" + "io" "k8s.io/cli-runtime/pkg/genericiooptions" ) @@ -48,7 +48,7 @@ func NewTestIOStreamsDiscard() genericiooptions.IOStreams { in := &bytes.Buffer{} return IOStreams{ In: in, - Out: ioutil.Discard, - ErrOut: ioutil.Discard, + Out: io.Discard, + ErrOut: io.Discard, } } diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go index 06bef474a3..a3d86d7619 100644 --- a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go @@ -18,7 +18,7 @@ package genericclioptions import ( "fmt" - "io/ioutil" + "os" "sort" "strings" @@ -88,7 +88,7 @@ func (f *JSONPathPrintFlags) ToPrinter(templateFormat string) (printers.Resource } if templateFormat == "jsonpath-file" { - data, err := ioutil.ReadFile(templateValue) + data, err := os.ReadFile(templateValue) if err != nil { return nil, fmt.Errorf("error reading --template %s, %v", templateValue, err) } diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go index 4502061ec4..9d670daa44 100644 --- a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go @@ -18,7 +18,7 @@ package genericclioptions import ( "fmt" - "io/ioutil" + "os" "sort" "strings" @@ -89,7 +89,7 @@ func (f *GoTemplatePrintFlags) ToPrinter(templateFormat string) (printers.Resour } if templateFormat == "templatefile" || templateFormat == "go-template-file" { - data, err := ioutil.ReadFile(templateValue) + data, err := os.ReadFile(templateValue) if err != nil { return nil, fmt.Errorf("error reading --template %s, %v", templateValue, err) } diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/terminal.go b/vendor/k8s.io/cli-runtime/pkg/printers/terminal.go index 5a59491e49..9dc904e59c 100644 --- a/vendor/k8s.io/cli-runtime/pkg/printers/terminal.go +++ b/vendor/k8s.io/cli-runtime/pkg/printers/terminal.go @@ -18,7 +18,11 @@ package printers import ( "io" + "os" + "runtime" "strings" + + "github.com/moby/term" ) // terminalEscaper replaces ANSI escape sequences and other terminal special @@ -37,3 +41,35 @@ func WriteEscaped(writer io.Writer, output string) error { func EscapeTerminal(in string) string { return terminalEscaper.Replace(in) } + +// IsTerminal returns whether the passed object is a terminal or not +func IsTerminal(i interface{}) bool { + _, terminal := term.GetFdInfo(i) + return terminal +} + +// AllowsColorOutput returns true if the specified writer is a terminal and +// the process environment indicates color output is supported and desired. +func AllowsColorOutput(w io.Writer) bool { + if !IsTerminal(w) { + return false + } + + // https://en.wikipedia.org/wiki/Computer_terminal#Dumb_terminals + if os.Getenv("TERM") == "dumb" { + return false + } + + // https://no-color.org/ + if _, nocolor := os.LookupEnv("NO_COLOR"); nocolor { + return false + } + + // On Windows WT_SESSION is set by the modern terminal component. + // Older terminals have poor support for UTF-8, VT escape codes, etc. + if runtime.GOOS == "windows" && os.Getenv("WT_SESSION") == "" { + return false + } + + return true +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go new file mode 100644 index 0000000000..5aa686782b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go @@ -0,0 +1,79 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterTrustBundleProjectionApplyConfiguration represents an declarative configuration of the ClusterTrustBundleProjection type for use +// with apply. +type ClusterTrustBundleProjectionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + SignerName *string `json:"signerName,omitempty"` + LabelSelector *v1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` + Optional *bool `json:"optional,omitempty"` + Path *string `json:"path,omitempty"` +} + +// ClusterTrustBundleProjectionApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleProjection type for use with +// apply. +func ClusterTrustBundleProjection() *ClusterTrustBundleProjectionApplyConfiguration { + return &ClusterTrustBundleProjectionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithName(value string) *ClusterTrustBundleProjectionApplyConfiguration { + b.Name = &value + return b +} + +// WithSignerName sets the SignerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignerName field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithSignerName(value string) *ClusterTrustBundleProjectionApplyConfiguration { + b.SignerName = &value + return b +} + +// WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelSelector field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithLabelSelector(value *v1.LabelSelectorApplyConfiguration) *ClusterTrustBundleProjectionApplyConfiguration { + b.LabelSelector = value + return b +} + +// WithOptional sets the Optional field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Optional field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithOptional(value bool) *ClusterTrustBundleProjectionApplyConfiguration { + b.Optional = &value + return b +} + +// WithPath sets the Path field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Path field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithPath(value string) *ClusterTrustBundleProjectionApplyConfiguration { + b.Path = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go index 6e373dd4ed..e4ae9c49f7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go @@ -24,6 +24,7 @@ type LifecycleHandlerApplyConfiguration struct { Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` HTTPGet *HTTPGetActionApplyConfiguration `json:"httpGet,omitempty"` TCPSocket *TCPSocketActionApplyConfiguration `json:"tcpSocket,omitempty"` + Sleep *SleepActionApplyConfiguration `json:"sleep,omitempty"` } // LifecycleHandlerApplyConfiguration constructs an declarative configuration of the LifecycleHandler type for use with @@ -55,3 +56,11 @@ func (b *LifecycleHandlerApplyConfiguration) WithTCPSocket(value *TCPSocketActio b.TCPSocket = value return b } + +// WithSleep sets the Sleep field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Sleep field is set to the value of the last call. +func (b *LifecycleHandlerApplyConfiguration) WithSleep(value *SleepActionApplyConfiguration) *LifecycleHandlerApplyConfiguration { + b.Sleep = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go index 64d27bdad5..a48dac6810 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go @@ -18,11 +18,16 @@ limitations under the License. package v1 +import ( + v1 "k8s.io/api/core/v1" +) + // LoadBalancerIngressApplyConfiguration represents an declarative configuration of the LoadBalancerIngress type for use // with apply. type LoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` Hostname *string `json:"hostname,omitempty"` + IPMode *v1.LoadBalancerIPMode `json:"ipMode,omitempty"` Ports []PortStatusApplyConfiguration `json:"ports,omitempty"` } @@ -48,6 +53,14 @@ func (b *LoadBalancerIngressApplyConfiguration) WithHostname(value string) *Load return b } +// WithIPMode sets the IPMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPMode field is set to the value of the last call. +func (b *LoadBalancerIngressApplyConfiguration) WithIPMode(value v1.LoadBalancerIPMode) *LoadBalancerIngressApplyConfiguration { + b.IPMode = &value + return b +} + // WithPorts adds the given value to the Ports field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Ports field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go new file mode 100644 index 0000000000..4ff1d040cf --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ModifyVolumeStatusApplyConfiguration represents an declarative configuration of the ModifyVolumeStatus type for use +// with apply. +type ModifyVolumeStatusApplyConfiguration struct { + TargetVolumeAttributesClassName *string `json:"targetVolumeAttributesClassName,omitempty"` + Status *v1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"` +} + +// ModifyVolumeStatusApplyConfiguration constructs an declarative configuration of the ModifyVolumeStatus type for use with +// apply. +func ModifyVolumeStatus() *ModifyVolumeStatusApplyConfiguration { + return &ModifyVolumeStatusApplyConfiguration{} +} + +// WithTargetVolumeAttributesClassName sets the TargetVolumeAttributesClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetVolumeAttributesClassName field is set to the value of the last call. +func (b *ModifyVolumeStatusApplyConfiguration) WithTargetVolumeAttributesClassName(value string) *ModifyVolumeStatusApplyConfiguration { + b.TargetVolumeAttributesClassName = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ModifyVolumeStatusApplyConfiguration) WithStatus(value v1.PersistentVolumeClaimModifyVolumeStatus) *ModifyVolumeStatusApplyConfiguration { + b.Status = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go index f324584aba..4db12fadb3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go @@ -26,14 +26,15 @@ import ( // PersistentVolumeClaimSpecApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimSpec type for use // with apply. type PersistentVolumeClaimSpecApplyConfiguration struct { - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` - Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeName *string `json:"volumeName,omitempty"` - StorageClassName *string `json:"storageClassName,omitempty"` - VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` - DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"` - DataSourceRef *TypedObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"` + AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Resources *VolumeResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + VolumeName *string `json:"volumeName,omitempty"` + StorageClassName *string `json:"storageClassName,omitempty"` + VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` + DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"` + DataSourceRef *TypedObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"` + VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` } // PersistentVolumeClaimSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimSpec type for use with @@ -63,7 +64,7 @@ func (b *PersistentVolumeClaimSpecApplyConfiguration) WithSelector(value *metav1 // WithResources sets the Resources field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Resources field is set to the value of the last call. -func (b *PersistentVolumeClaimSpecApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *PersistentVolumeClaimSpecApplyConfiguration { +func (b *PersistentVolumeClaimSpecApplyConfiguration) WithResources(value *VolumeResourceRequirementsApplyConfiguration) *PersistentVolumeClaimSpecApplyConfiguration { b.Resources = value return b } @@ -107,3 +108,11 @@ func (b *PersistentVolumeClaimSpecApplyConfiguration) WithDataSourceRef(value *T b.DataSourceRef = value return b } + +// WithVolumeAttributesClassName sets the VolumeAttributesClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VolumeAttributesClassName field is set to the value of the last call. +func (b *PersistentVolumeClaimSpecApplyConfiguration) WithVolumeAttributesClassName(value string) *PersistentVolumeClaimSpecApplyConfiguration { + b.VolumeAttributesClassName = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go index c29b2a9a15..1f6d5ae323 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go @@ -25,12 +25,14 @@ import ( // PersistentVolumeClaimStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimStatus type for use // with apply. type PersistentVolumeClaimStatusApplyConfiguration struct { - Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` - Capacity *v1.ResourceList `json:"capacity,omitempty"` - Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` - AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"` - AllocatedResourceStatuses map[v1.ResourceName]v1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"` + Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` + AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + Capacity *v1.ResourceList `json:"capacity,omitempty"` + Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` + AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"` + AllocatedResourceStatuses map[v1.ResourceName]v1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"` + CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty"` + ModifyVolumeStatus *ModifyVolumeStatusApplyConfiguration `json:"modifyVolumeStatus,omitempty"` } // PersistentVolumeClaimStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimStatus type for use with @@ -99,3 +101,19 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResourceSta } return b } + +// WithCurrentVolumeAttributesClassName sets the CurrentVolumeAttributesClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CurrentVolumeAttributesClassName field is set to the value of the last call. +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithCurrentVolumeAttributesClassName(value string) *PersistentVolumeClaimStatusApplyConfiguration { + b.CurrentVolumeAttributesClassName = &value + return b +} + +// WithModifyVolumeStatus sets the ModifyVolumeStatus field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ModifyVolumeStatus field is set to the value of the last call. +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithModifyVolumeStatus(value *ModifyVolumeStatusApplyConfiguration) *PersistentVolumeClaimStatusApplyConfiguration { + b.ModifyVolumeStatus = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go index b3a72b1c3e..8a30dab649 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go @@ -34,6 +34,7 @@ type PersistentVolumeSpecApplyConfiguration struct { MountOptions []string `json:"mountOptions,omitempty"` VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` NodeAffinity *VolumeNodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"` + VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` } // PersistentVolumeSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeSpec type for use with @@ -285,3 +286,11 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithNodeAffinity(value *VolumeN b.NodeAffinity = value return b } + +// WithVolumeAttributesClassName sets the VolumeAttributesClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VolumeAttributesClassName field is set to the value of the last call. +func (b *PersistentVolumeSpecApplyConfiguration) WithVolumeAttributesClassName(value string) *PersistentVolumeSpecApplyConfiguration { + b.VolumeAttributesClassName = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go index 7d2492203e..ac1eab3d8c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go @@ -29,6 +29,8 @@ type PodAffinityTermApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` TopologyKey *string `json:"topologyKey,omitempty"` NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` + MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"` } // PodAffinityTermApplyConfiguration constructs an declarative configuration of the PodAffinityTerm type for use with @@ -70,3 +72,23 @@ func (b *PodAffinityTermApplyConfiguration) WithNamespaceSelector(value *v1.Labe b.NamespaceSelector = value return b } + +// WithMatchLabelKeys adds the given value to the MatchLabelKeys field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchLabelKeys field. +func (b *PodAffinityTermApplyConfiguration) WithMatchLabelKeys(values ...string) *PodAffinityTermApplyConfiguration { + for i := range values { + b.MatchLabelKeys = append(b.MatchLabelKeys, values[i]) + } + return b +} + +// WithMismatchLabelKeys adds the given value to the MismatchLabelKeys field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MismatchLabelKeys field. +func (b *PodAffinityTermApplyConfiguration) WithMismatchLabelKeys(values ...string) *PodAffinityTermApplyConfiguration { + for i := range values { + b.MismatchLabelKeys = append(b.MismatchLabelKeys, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedcsidriver.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go similarity index 51% rename from vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedcsidriver.go rename to vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go index 27b49bf153..8b3284536a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedcsidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go @@ -16,24 +16,24 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1beta1 +package v1 -// AllowedCSIDriverApplyConfiguration represents an declarative configuration of the AllowedCSIDriver type for use +// SleepActionApplyConfiguration represents an declarative configuration of the SleepAction type for use // with apply. -type AllowedCSIDriverApplyConfiguration struct { - Name *string `json:"name,omitempty"` +type SleepActionApplyConfiguration struct { + Seconds *int64 `json:"seconds,omitempty"` } -// AllowedCSIDriverApplyConfiguration constructs an declarative configuration of the AllowedCSIDriver type for use with +// SleepActionApplyConfiguration constructs an declarative configuration of the SleepAction type for use with // apply. -func AllowedCSIDriver() *AllowedCSIDriverApplyConfiguration { - return &AllowedCSIDriverApplyConfiguration{} +func SleepAction() *SleepActionApplyConfiguration { + return &SleepActionApplyConfiguration{} } -// WithName sets the Name field in the declarative configuration to the given value +// WithSeconds sets the Seconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *AllowedCSIDriverApplyConfiguration) WithName(value string) *AllowedCSIDriverApplyConfiguration { - b.Name = &value +// If called multiple times, the Seconds field is set to the value of the last call. +func (b *SleepActionApplyConfiguration) WithSeconds(value int64) *SleepActionApplyConfiguration { + b.Seconds = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go index 8d16ea79eb..a2ef0a9943 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go @@ -25,6 +25,7 @@ type VolumeProjectionApplyConfiguration struct { DownwardAPI *DownwardAPIProjectionApplyConfiguration `json:"downwardAPI,omitempty"` ConfigMap *ConfigMapProjectionApplyConfiguration `json:"configMap,omitempty"` ServiceAccountToken *ServiceAccountTokenProjectionApplyConfiguration `json:"serviceAccountToken,omitempty"` + ClusterTrustBundle *ClusterTrustBundleProjectionApplyConfiguration `json:"clusterTrustBundle,omitempty"` } // VolumeProjectionApplyConfiguration constructs an declarative configuration of the VolumeProjection type for use with @@ -64,3 +65,11 @@ func (b *VolumeProjectionApplyConfiguration) WithServiceAccountToken(value *Serv b.ServiceAccountToken = value return b } + +// WithClusterTrustBundle sets the ClusterTrustBundle field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterTrustBundle field is set to the value of the last call. +func (b *VolumeProjectionApplyConfiguration) WithClusterTrustBundle(value *ClusterTrustBundleProjectionApplyConfiguration) *VolumeProjectionApplyConfiguration { + b.ClusterTrustBundle = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go new file mode 100644 index 0000000000..89ad1da8b3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// VolumeResourceRequirementsApplyConfiguration represents an declarative configuration of the VolumeResourceRequirements type for use +// with apply. +type VolumeResourceRequirementsApplyConfiguration struct { + Limits *v1.ResourceList `json:"limits,omitempty"` + Requests *v1.ResourceList `json:"requests,omitempty"` +} + +// VolumeResourceRequirementsApplyConfiguration constructs an declarative configuration of the VolumeResourceRequirements type for use with +// apply. +func VolumeResourceRequirements() *VolumeResourceRequirementsApplyConfiguration { + return &VolumeResourceRequirementsApplyConfiguration{} +} + +// WithLimits sets the Limits field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Limits field is set to the value of the last call. +func (b *VolumeResourceRequirementsApplyConfiguration) WithLimits(value v1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { + b.Limits = &value + return b +} + +// WithRequests sets the Requests field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Requests field is set to the value of the last call. +func (b *VolumeResourceRequirementsApplyConfiguration) WithRequests(value v1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { + b.Requests = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go index 3535d74787..cd21214f5a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go similarity index 87% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowdistinguishermethod.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go index 507f8e9abe..d9c8a79cc8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" ) // FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { - Type *v1alpha1.FlowDistinguisherMethodType `json:"type,omitempty"` + Type *v1.FlowDistinguisherMethodType `json:"type,omitempty"` } // FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with @@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1alpha1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go similarity index 94% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go index 20251d08bf..8809fafbae 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -42,7 +42,7 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} b.WithName(name) b.WithKind("FlowSchema") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") return b } @@ -57,27 +57,27 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractFlowSchema(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { +func ExtractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { return extractFlowSchema(flowSchema, fieldManager, "") } // ExtractFlowSchemaStatus is the same as ExtractFlowSchema except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { +func ExtractFlowSchemaStatus(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { return extractFlowSchema(flowSchema, fieldManager, "status") } -func extractFlowSchema(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { +func extractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { b := &FlowSchemaApplyConfiguration{} - err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.FlowSchema"), fieldManager, b, subresource) + err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1.FlowSchema"), fieldManager, b, subresource) if err != nil { return nil, err } b.WithName(flowSchema.Name) b.WithKind("FlowSchema") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go similarity index 81% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemacondition.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go index 31f5dc13ed..808ab09a55 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go @@ -16,21 +16,21 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { - Type *v1alpha1.FlowSchemaConditionType `json:"type,omitempty"` - Status *v1alpha1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *v1.FlowSchemaConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with @@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1alpha1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1alpha1.FlowSche // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1alpha1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { b.Status = &value return b } @@ -58,7 +58,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1alpha1.Condit // WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastTransitionTime field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *FlowSchemaConditionApplyConfiguration { b.LastTransitionTime = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemaspec.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go index fd5fc0ae9a..2785f5baf3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemastatus.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go index db2dacf13a..7c61360a53 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go similarity index 98% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/groupsubject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go index 0421f3f599..92a03d8628 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/groupsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go similarity index 90% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go index 10660e81aa..c19f097035 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go @@ -16,12 +16,12 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { - AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` @@ -33,11 +33,11 @@ func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApply return &LimitedPriorityLevelConfigurationApplyConfiguration{} } -// WithAssuredConcurrencyShares sets the AssuredConcurrencyShares field in the declarative configuration to the given value +// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AssuredConcurrencyShares field is set to the value of the last call. -func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithAssuredConcurrencyShares(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { - b.AssuredConcurrencyShares = &value +// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.NominalConcurrencyShares = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go similarity index 88% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitresponse.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go index 5edaa025cd..03ff6d9103 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" ) // LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { - Type *v1alpha1.LimitResponseType `json:"type,omitempty"` + Type *v1.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } @@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LimitResponseApplyConfiguration) WithType(value v1alpha1.LimitResponseType) *LimitResponseApplyConfiguration { +func (b *LimitResponseApplyConfiguration) WithType(value v1.LimitResponseType) *LimitResponseApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/nonresourcepolicyrule.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go index b1f09f5304..d9f8c2eccf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/nonresourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/policyruleswithsubjects.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go index 8411040644..b193efa8bf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/policyruleswithsubjects.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go similarity index 94% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go index a40db75dcb..e8a1b97c9f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -42,7 +42,7 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon b := &PriorityLevelConfigurationApplyConfiguration{} b.WithName(name) b.WithKind("PriorityLevelConfiguration") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") return b } @@ -57,27 +57,27 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "") } // ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status") } -func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func extractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { b := &PriorityLevelConfigurationApplyConfiguration{} - err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration"), fieldManager, b, subresource) + err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration"), fieldManager, b, subresource) if err != nil { return nil, err } b.WithName(priorityLevelConfiguration.Name) b.WithKind("PriorityLevelConfiguration") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go similarity index 81% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationcondition.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go index bd91b80f21..6ce588c8d9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go @@ -16,21 +16,21 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { - Type *v1alpha1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` - Status *v1alpha1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *v1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with @@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1alpha1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1alpha1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { b.Status = &value return b } @@ -58,7 +58,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value // WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastTransitionTime field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *PriorityLevelConfigurationConditionApplyConfiguration { b.LastTransitionTime = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go similarity index 98% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationreference.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go index b477c04df5..0638aee8b8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go similarity index 92% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go index ade920a755..5d88749593 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" ) // PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { - Type *v1alpha1.PriorityLevelEnablement `json:"type,omitempty"` + Type *v1.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } @@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1alpha1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go index eb3ef3d61d..322871edc6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/queuingconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go index 0fccc3f08b..69fd2c23cc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/queuingconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/resourcepolicyrule.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go index d2c6f4eed6..0991ce9445 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/resourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/serviceaccountsubject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go index 270b5225e1..55787ca767 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/serviceaccountsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go similarity index 92% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/subject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go index 83c09d644b..f02b03bdc7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" ) // SubjectApplyConfiguration represents an declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { - Kind *v1alpha1.SubjectKind `json:"kind,omitempty"` + Kind *v1.SubjectKind `json:"kind,omitempty"` User *UserSubjectApplyConfiguration `json:"user,omitempty"` Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` @@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration { // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *SubjectApplyConfiguration) WithKind(value v1alpha1.SubjectKind) *SubjectApplyConfiguration { +func (b *SubjectApplyConfiguration) WithKind(value v1.SubjectKind) *SubjectApplyConfiguration { b.Kind = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go similarity index 98% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/usersubject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go index a762c249e0..2d17c111c6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/usersubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 // UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index 3ed553662f..2ceb262217 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -1013,7 +1013,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1075,7 +1074,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: data type: namedType: __untyped_atomic_ - default: {} - name: kind type: scalar: string @@ -1114,7 +1112,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1227,11 +1224,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1343,7 +1338,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1455,7 +1449,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1586,7 +1579,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: data type: namedType: __untyped_atomic_ - default: {} - name: kind type: scalar: string @@ -1625,11 +1617,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1768,7 +1758,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1899,7 +1888,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: data type: namedType: __untyped_atomic_ - default: {} - name: kind type: scalar: string @@ -1938,7 +1926,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2051,11 +2038,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2167,7 +2152,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2279,7 +2263,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2606,7 +2589,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2860,7 +2842,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: name type: scalar: string @@ -2904,7 +2885,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: metricName type: scalar: string @@ -2939,7 +2919,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -3067,7 +3046,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: targetValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus map: fields: @@ -3077,7 +3055,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: metricName type: scalar: string @@ -3102,14 +3079,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: targetAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: io.k8s.api.autoscaling.v2beta1.PodsMetricStatus map: fields: - name: currentAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: metricName type: scalar: string @@ -3139,7 +3114,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: name type: scalar: string @@ -3276,7 +3250,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -3591,11 +3564,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -3876,11 +3847,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -4002,11 +3971,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -4404,6 +4371,25 @@ var schemaYAML = typed.YAMLObject(`types: - name: timeoutSeconds type: scalar: numeric +- name: io.k8s.api.core.v1.ClusterTrustBundleProjection + map: + fields: + - name: labelSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: name + type: + scalar: string + - name: optional + type: + scalar: boolean + - name: path + type: + scalar: string + default: "" + - name: signerName + type: + scalar: string - name: io.k8s.api.core.v1.ComponentCondition map: fields: @@ -4716,7 +4702,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: startedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.core.v1.ContainerStateTerminated map: fields: @@ -4730,7 +4715,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: finishedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -4743,7 +4727,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: startedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.core.v1.ContainerStateWaiting map: fields: @@ -5099,11 +5082,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: eventTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: firstTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: involvedObject type: namedType: io.k8s.api.core.v1.ObjectReference @@ -5114,7 +5095,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -5155,7 +5135,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastObservedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: io.k8s.api.core.v1.EventSource map: fields: @@ -5338,7 +5317,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: port type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: scheme type: scalar: string @@ -5497,6 +5475,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: httpGet type: namedType: io.k8s.api.core.v1.HTTPGetAction + - name: sleep + type: + namedType: io.k8s.api.core.v1.SleepAction - name: tcpSocket type: namedType: io.k8s.api.core.v1.TCPSocketAction @@ -5567,6 +5548,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string + - name: ipMode + type: + scalar: string - name: ports type: list: @@ -5599,6 +5583,16 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.core.v1.ModifyVolumeStatus + map: + fields: + - name: status + type: + scalar: string + default: "" + - name: targetVolumeAttributesClassName + type: + scalar: string - name: io.k8s.api.core.v1.NFSVolumeSource map: fields: @@ -5640,7 +5634,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -5728,11 +5721,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastHeartbeatTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -6036,11 +6027,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -6072,7 +6061,7 @@ var schemaYAML = typed.YAMLObject(`types: namedType: io.k8s.api.core.v1.TypedObjectReference - name: resources type: - namedType: io.k8s.api.core.v1.ResourceRequirements + namedType: io.k8s.api.core.v1.VolumeResourceRequirements default: {} - name: selector type: @@ -6080,6 +6069,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageClassName type: scalar: string + - name: volumeAttributesClassName + type: + scalar: string - name: volumeMode type: scalar: string @@ -6119,6 +6111,12 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type + - name: currentVolumeAttributesClassName + type: + scalar: string + - name: modifyVolumeStatus + type: + namedType: io.k8s.api.core.v1.ModifyVolumeStatus - name: phase type: scalar: string @@ -6239,6 +6237,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageos type: namedType: io.k8s.api.core.v1.StorageOSPersistentVolumeSource + - name: volumeAttributesClassName + type: + scalar: string - name: volumeMode type: scalar: string @@ -6312,6 +6313,18 @@ var schemaYAML = typed.YAMLObject(`types: - name: labelSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: matchLabelKeys + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: mismatchLabelKeys + type: + list: + elementType: + scalar: string + elementRelationship: atomic - name: namespaceSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector @@ -6346,11 +6359,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -6960,7 +6971,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -7036,7 +7046,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: divisor type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: resource type: scalar: string @@ -7459,7 +7468,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: targetPort type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.core.v1.ServiceSpec map: fields: @@ -7562,6 +7570,13 @@ var schemaYAML = typed.YAMLObject(`types: - name: clientIP type: namedType: io.k8s.api.core.v1.ClientIPConfig +- name: io.k8s.api.core.v1.SleepAction + map: + fields: + - name: seconds + type: + scalar: numeric + default: 0 - name: io.k8s.api.core.v1.StorageOSPersistentVolumeSource map: fields: @@ -7618,7 +7633,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: port type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.core.v1.Taint map: fields: @@ -7879,6 +7893,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.VolumeProjection map: fields: + - name: clusterTrustBundle + type: + namedType: io.k8s.api.core.v1.ClusterTrustBundleProjection - name: configMap type: namedType: io.k8s.api.core.v1.ConfigMapProjection @@ -7891,6 +7908,19 @@ var schemaYAML = typed.YAMLObject(`types: - name: serviceAccountToken type: namedType: io.k8s.api.core.v1.ServiceAccountTokenProjection +- name: io.k8s.api.core.v1.VolumeResourceRequirements + map: + fields: + - name: limits + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: requests + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource map: fields: @@ -8156,11 +8186,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: deprecatedFirstTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deprecatedLastTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deprecatedSource type: namedType: io.k8s.api.core.v1.EventSource @@ -8168,7 +8196,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: eventTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: kind type: scalar: string @@ -8211,7 +8238,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastObservedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: io.k8s.api.events.v1beta1.Event map: fields: @@ -8227,11 +8253,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: deprecatedFirstTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deprecatedLastTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deprecatedSource type: namedType: io.k8s.api.core.v1.EventSource @@ -8239,7 +8263,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: eventTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: kind type: scalar: string @@ -8282,7 +8305,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastObservedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: io.k8s.api.extensions.v1beta1.DaemonSet map: fields: @@ -8310,7 +8332,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8426,11 +8447,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8586,7 +8605,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: servicePort type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.extensions.v1beta1.IngressLoadBalancerIngress map: fields: @@ -8797,7 +8815,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8879,7 +8896,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: maxUnavailable type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1.ExemptPriorityLevelConfiguration map: fields: - name: lendablePercent @@ -8888,14 +8905,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: nominalConcurrencyShares type: scalar: numeric -- name: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod +- name: io.k8s.api.flowcontrol.v1.FlowDistinguisherMethod map: fields: - name: type type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchema +- name: io.k8s.api.flowcontrol.v1.FlowSchema map: fields: - name: apiVersion @@ -8910,19 +8927,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec + namedType: io.k8s.api.flowcontrol.v1.FlowSchemaSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus + namedType: io.k8s.api.flowcontrol.v1.FlowSchemaStatus default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition +- name: io.k8s.api.flowcontrol.v1.FlowSchemaCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8935,50 +8951,50 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec +- name: io.k8s.api.flowcontrol.v1.FlowSchemaSpec map: fields: - name: distinguisherMethod type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod + namedType: io.k8s.api.flowcontrol.v1.FlowDistinguisherMethod - name: matchingPrecedence type: scalar: numeric default: 0 - name: priorityLevelConfiguration type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference + namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationReference default: {} - name: rules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects + namedType: io.k8s.api.flowcontrol.v1.PolicyRulesWithSubjects elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus +- name: io.k8s.api.flowcontrol.v1.FlowSchemaStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition + namedType: io.k8s.api.flowcontrol.v1.FlowSchemaCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1alpha1.GroupSubject +- name: io.k8s.api.flowcontrol.v1.GroupSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.LimitResponse +- name: io.k8s.api.flowcontrol.v1.LimitResponse map: fields: - name: queuing type: - namedType: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration + namedType: io.k8s.api.flowcontrol.v1.QueuingConfiguration - name: type type: scalar: string @@ -8988,13 +9004,9 @@ var schemaYAML = typed.YAMLObject(`types: fields: - fieldName: queuing discriminatorValue: Queuing -- name: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1.LimitedPriorityLevelConfiguration map: fields: - - name: assuredConcurrencyShares - type: - scalar: numeric - default: 0 - name: borrowingLimitPercent type: scalar: numeric @@ -9003,9 +9015,12 @@ var schemaYAML = typed.YAMLObject(`types: scalar: numeric - name: limitResponse type: - namedType: io.k8s.api.flowcontrol.v1alpha1.LimitResponse + namedType: io.k8s.api.flowcontrol.v1.LimitResponse default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule + - name: nominalConcurrencyShares + type: + scalar: numeric +- name: io.k8s.api.flowcontrol.v1.NonResourcePolicyRule map: fields: - name: nonResourceURLs @@ -9020,28 +9035,28 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects +- name: io.k8s.api.flowcontrol.v1.PolicyRulesWithSubjects map: fields: - name: nonResourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1.NonResourcePolicyRule elementRelationship: atomic - name: resourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1.ResourcePolicyRule elementRelationship: atomic - name: subjects type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.Subject + namedType: io.k8s.api.flowcontrol.v1.Subject elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration map: fields: - name: apiVersion @@ -9056,19 +9071,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec + namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus + namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationStatus default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9081,22 +9095,22 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationReference map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationSpec map: fields: - name: exempt type: - namedType: io.k8s.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration + namedType: io.k8s.api.flowcontrol.v1.ExemptPriorityLevelConfiguration - name: limited type: - namedType: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration + namedType: io.k8s.api.flowcontrol.v1.LimitedPriorityLevelConfiguration - name: type type: scalar: string @@ -9108,18 +9122,18 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: Exempt - fieldName: limited discriminatorValue: Limited -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition + namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration +- name: io.k8s.api.flowcontrol.v1.QueuingConfiguration map: fields: - name: handSize @@ -9134,7 +9148,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 -- name: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1.ResourcePolicyRule map: fields: - name: apiGroups @@ -9164,7 +9178,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject +- name: io.k8s.api.flowcontrol.v1.ServiceAccountSubject map: fields: - name: name @@ -9175,22 +9189,22 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.Subject +- name: io.k8s.api.flowcontrol.v1.Subject map: fields: - name: group type: - namedType: io.k8s.api.flowcontrol.v1alpha1.GroupSubject + namedType: io.k8s.api.flowcontrol.v1.GroupSubject - name: kind type: scalar: string default: "" - name: serviceAccount type: - namedType: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject + namedType: io.k8s.api.flowcontrol.v1.ServiceAccountSubject - name: user type: - namedType: io.k8s.api.flowcontrol.v1alpha1.UserSubject + namedType: io.k8s.api.flowcontrol.v1.UserSubject unions: - discriminator: kind fields: @@ -9200,7 +9214,7 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: ServiceAccount - fieldName: user discriminatorValue: User -- name: io.k8s.api.flowcontrol.v1alpha1.UserSubject +- name: io.k8s.api.flowcontrol.v1.UserSubject map: fields: - name: name @@ -9250,7 +9264,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9396,7 +9409,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9578,7 +9590,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9724,7 +9735,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9906,7 +9916,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -10052,7 +10061,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -10564,7 +10572,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: number type: scalar: numeric -- name: io.k8s.api.networking.v1alpha1.ClusterCIDR +- name: io.k8s.api.networking.v1alpha1.IPAddress map: fields: - name: apiVersion @@ -10579,27 +10587,30 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec + namedType: io.k8s.api.networking.v1alpha1.IPAddressSpec default: {} -- name: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec +- name: io.k8s.api.networking.v1alpha1.IPAddressSpec map: fields: - - name: ipv4 + - name: parentRef + type: + namedType: io.k8s.api.networking.v1alpha1.ParentReference +- name: io.k8s.api.networking.v1alpha1.ParentReference + map: + fields: + - name: group type: scalar: string - default: "" - - name: ipv6 + - name: name type: scalar: string - default: "" - - name: nodeSelector + - name: namespace type: - namedType: io.k8s.api.core.v1.NodeSelector - - name: perNodeHostBits + scalar: string + - name: resource type: - scalar: numeric - default: 0 -- name: io.k8s.api.networking.v1alpha1.IPAddress + scalar: string +- name: io.k8s.api.networking.v1alpha1.ServiceCIDR map: fields: - name: apiVersion @@ -10614,32 +10625,32 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.networking.v1alpha1.IPAddressSpec + namedType: io.k8s.api.networking.v1alpha1.ServiceCIDRSpec default: {} -- name: io.k8s.api.networking.v1alpha1.IPAddressSpec + - name: status + type: + namedType: io.k8s.api.networking.v1alpha1.ServiceCIDRStatus + default: {} +- name: io.k8s.api.networking.v1alpha1.ServiceCIDRSpec map: fields: - - name: parentRef + - name: cidrs type: - namedType: io.k8s.api.networking.v1alpha1.ParentReference -- name: io.k8s.api.networking.v1alpha1.ParentReference + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.networking.v1alpha1.ServiceCIDRStatus map: fields: - - name: group - type: - scalar: string - - name: name - type: - scalar: string - - name: namespace - type: - scalar: string - - name: resource - type: - scalar: string - - name: uid + - name: conditions type: - scalar: string + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type - name: io.k8s.api.networking.v1beta1.HTTPIngressPath map: fields: @@ -10695,7 +10706,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: servicePort type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.networking.v1beta1.IngressClass map: fields: @@ -11061,29 +11071,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: observedGeneration type: scalar: numeric -- name: io.k8s.api.policy.v1beta1.AllowedCSIDriver - map: - fields: - - name: name - type: - scalar: string - default: "" -- name: io.k8s.api.policy.v1beta1.AllowedFlexVolume - map: - fields: - - name: driver - type: - scalar: string - default: "" -- name: io.k8s.api.policy.v1beta1.AllowedHostPath - map: - fields: - - name: pathPrefix - type: - scalar: string - - name: readOnly - type: - scalar: boolean - name: io.k8s.api.policy.v1beta1.Eviction map: fields: @@ -11100,40 +11087,6 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} -- name: io.k8s.api.policy.v1beta1.FSGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string -- name: io.k8s.api.policy.v1beta1.HostPortRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 -- name: io.k8s.api.policy.v1beta1.IDRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 - name: io.k8s.api.policy.v1beta1.PodDisruptionBudget map: fields: @@ -11205,195 +11158,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: observedGeneration type: scalar: numeric -- name: io.k8s.api.policy.v1beta1.PodSecurityPolicy - map: - fields: - - name: apiVersion - type: - scalar: string - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: io.k8s.api.policy.v1beta1.PodSecurityPolicySpec - default: {} -- name: io.k8s.api.policy.v1beta1.PodSecurityPolicySpec - map: - fields: - - name: allowPrivilegeEscalation - type: - scalar: boolean - - name: allowedCSIDrivers - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.AllowedCSIDriver - elementRelationship: atomic - - name: allowedCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedFlexVolumes - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.AllowedFlexVolume - elementRelationship: atomic - - name: allowedHostPaths - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.AllowedHostPath - elementRelationship: atomic - - name: allowedProcMountTypes - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedUnsafeSysctls - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultAddCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultAllowPrivilegeEscalation - type: - scalar: boolean - - name: forbiddenSysctls - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: fsGroup - type: - namedType: io.k8s.api.policy.v1beta1.FSGroupStrategyOptions - default: {} - - name: hostIPC - type: - scalar: boolean - - name: hostNetwork - type: - scalar: boolean - - name: hostPID - type: - scalar: boolean - - name: hostPorts - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.HostPortRange - elementRelationship: atomic - - name: privileged - type: - scalar: boolean - - name: readOnlyRootFilesystem - type: - scalar: boolean - - name: requiredDropCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: runAsGroup - type: - namedType: io.k8s.api.policy.v1beta1.RunAsGroupStrategyOptions - - name: runAsUser - type: - namedType: io.k8s.api.policy.v1beta1.RunAsUserStrategyOptions - default: {} - - name: runtimeClass - type: - namedType: io.k8s.api.policy.v1beta1.RuntimeClassStrategyOptions - - name: seLinux - type: - namedType: io.k8s.api.policy.v1beta1.SELinuxStrategyOptions - default: {} - - name: supplementalGroups - type: - namedType: io.k8s.api.policy.v1beta1.SupplementalGroupsStrategyOptions - default: {} - - name: volumes - type: - list: - elementType: - scalar: string - elementRelationship: atomic -- name: io.k8s.api.policy.v1beta1.RunAsGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - default: "" -- name: io.k8s.api.policy.v1beta1.RunAsUserStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - default: "" -- name: io.k8s.api.policy.v1beta1.RuntimeClassStrategyOptions - map: - fields: - - name: allowedRuntimeClassNames - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultRuntimeClassName - type: - scalar: string -- name: io.k8s.api.policy.v1beta1.SELinuxStrategyOptions - map: - fields: - - name: rule - type: - scalar: string - default: "" - - name: seLinuxOptions - type: - namedType: io.k8s.api.core.v1.SELinuxOptions -- name: io.k8s.api.policy.v1beta1.SupplementalGroupsStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - name: io.k8s.api.rbac.v1.AggregationRule map: fields: @@ -11923,7 +11687,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: selectedNode type: scalar: string @@ -12002,7 +11766,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: associative + elementRelationship: atomic - name: io.k8s.api.resource.v1alpha2.ResourceClaimSpec map: fields: @@ -12440,7 +12204,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.storage.v1.VolumeNodeResources map: fields: @@ -12536,6 +12299,28 @@ var schemaYAML = typed.YAMLObject(`types: - name: detachError type: namedType: io.k8s.api.storage.v1alpha1.VolumeError +- name: io.k8s.api.storage.v1alpha1.VolumeAttributesClass + map: + fields: + - name: apiVersion + type: + scalar: string + - name: driverName + type: + scalar: string + default: "" + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: parameters + type: + map: + elementType: + scalar: string - name: io.k8s.api.storage.v1alpha1.VolumeError map: fields: @@ -12545,7 +12330,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.storage.v1beta1.CSIDriver map: fields: @@ -12795,7 +12579,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.storage.v1beta1.VolumeNodeResources map: fields: @@ -12810,7 +12593,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -12938,7 +12720,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: creationTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deletionGracePeriodSeconds type: scalar: numeric diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go index 8a58d9e870..a206bd326a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go @@ -125,7 +125,7 @@ func (e *extractor) extractUnstructured(object *unstructured.Unstructured, field return nil, fmt.Errorf("failed to fetch the objectType: %v", err) } result := &unstructured.Unstructured{} - err = managedfields.ExtractInto(object, *objectType, fieldManager, result, subresource) + err = managedfields.ExtractInto(object, *objectType, fieldManager, result, subresource) //nolint:forbidigo if err != nil { return nil, fmt.Errorf("failed calling ExtractInto for unstructured: %v", err) } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go deleted file mode 100644 index 8d5fa406b0..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// ClusterCIDRSpecApplyConfiguration represents an declarative configuration of the ClusterCIDRSpec type for use -// with apply. -type ClusterCIDRSpecApplyConfiguration struct { - NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` - PerNodeHostBits *int32 `json:"perNodeHostBits,omitempty"` - IPv4 *string `json:"ipv4,omitempty"` - IPv6 *string `json:"ipv6,omitempty"` -} - -// ClusterCIDRSpecApplyConfiguration constructs an declarative configuration of the ClusterCIDRSpec type for use with -// apply. -func ClusterCIDRSpec() *ClusterCIDRSpecApplyConfiguration { - return &ClusterCIDRSpecApplyConfiguration{} -} - -// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodeSelector field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ClusterCIDRSpecApplyConfiguration { - b.NodeSelector = value - return b -} - -// WithPerNodeHostBits sets the PerNodeHostBits field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PerNodeHostBits field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithPerNodeHostBits(value int32) *ClusterCIDRSpecApplyConfiguration { - b.PerNodeHostBits = &value - return b -} - -// WithIPv4 sets the IPv4 field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IPv4 field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithIPv4(value string) *ClusterCIDRSpecApplyConfiguration { - b.IPv4 = &value - return b -} - -// WithIPv6 sets the IPv6 field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IPv6 field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithIPv6(value string) *ClusterCIDRSpecApplyConfiguration { - b.IPv6 = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go index 14b10b19ff..ce1049709a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go @@ -18,18 +18,13 @@ limitations under the License. package v1alpha1 -import ( - types "k8s.io/apimachinery/pkg/types" -) - // ParentReferenceApplyConfiguration represents an declarative configuration of the ParentReference type for use // with apply. type ParentReferenceApplyConfiguration struct { - Group *string `json:"group,omitempty"` - Resource *string `json:"resource,omitempty"` - Namespace *string `json:"namespace,omitempty"` - Name *string `json:"name,omitempty"` - UID *types.UID `json:"uid,omitempty"` + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` } // ParentReferenceApplyConfiguration constructs an declarative configuration of the ParentReference type for use with @@ -69,11 +64,3 @@ func (b *ParentReferenceApplyConfiguration) WithName(value string) *ParentRefere b.Name = &value return b } - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ParentReferenceApplyConfiguration) WithUID(value types.UID) *ParentReferenceApplyConfiguration { - b.UID = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go similarity index 68% rename from vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go rename to vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go index ad0eae9198..f6d0a91e00 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go @@ -27,55 +27,56 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterCIDRApplyConfiguration represents an declarative configuration of the ClusterCIDR type for use +// ServiceCIDRApplyConfiguration represents an declarative configuration of the ServiceCIDR type for use // with apply. -type ClusterCIDRApplyConfiguration struct { +type ServiceCIDRApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ClusterCIDRSpecApplyConfiguration `json:"spec,omitempty"` + Spec *ServiceCIDRSpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"` } -// ClusterCIDR constructs an declarative configuration of the ClusterCIDR type for use with +// ServiceCIDR constructs an declarative configuration of the ServiceCIDR type for use with // apply. -func ClusterCIDR(name string) *ClusterCIDRApplyConfiguration { - b := &ClusterCIDRApplyConfiguration{} +func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration { + b := &ServiceCIDRApplyConfiguration{} b.WithName(name) - b.WithKind("ClusterCIDR") + b.WithKind("ServiceCIDR") b.WithAPIVersion("networking.k8s.io/v1alpha1") return b } -// ExtractClusterCIDR extracts the applied configuration owned by fieldManager from -// clusterCIDR. If no managedFields are found in clusterCIDR for fieldManager, a -// ClusterCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractServiceCIDR extracts the applied configuration owned by fieldManager from +// serviceCIDR. If no managedFields are found in serviceCIDR for fieldManager, a +// ServiceCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// clusterCIDR must be a unmodified ClusterCIDR API object that was retrieved from the Kubernetes API. -// ExtractClusterCIDR provides a way to perform a extract/modify-in-place/apply workflow. +// serviceCIDR must be a unmodified ServiceCIDR API object that was retrieved from the Kubernetes API. +// ExtractServiceCIDR provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) { - return extractClusterCIDR(clusterCIDR, fieldManager, "") +func ExtractServiceCIDR(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { + return extractServiceCIDR(serviceCIDR, fieldManager, "") } -// ExtractClusterCIDRStatus is the same as ExtractClusterCIDR except +// ExtractServiceCIDRStatus is the same as ExtractServiceCIDR except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractClusterCIDRStatus(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) { - return extractClusterCIDR(clusterCIDR, fieldManager, "status") +func ExtractServiceCIDRStatus(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { + return extractServiceCIDR(serviceCIDR, fieldManager, "status") } -func extractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string, subresource string) (*ClusterCIDRApplyConfiguration, error) { - b := &ClusterCIDRApplyConfiguration{} - err := managedfields.ExtractInto(clusterCIDR, internal.Parser().Type("io.k8s.api.networking.v1alpha1.ClusterCIDR"), fieldManager, b, subresource) +func extractServiceCIDR(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string, subresource string) (*ServiceCIDRApplyConfiguration, error) { + b := &ServiceCIDRApplyConfiguration{} + err := managedfields.ExtractInto(serviceCIDR, internal.Parser().Type("io.k8s.api.networking.v1alpha1.ServiceCIDR"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(clusterCIDR.Name) + b.WithName(serviceCIDR.Name) - b.WithKind("ClusterCIDR") + b.WithKind("ServiceCIDR") b.WithAPIVersion("networking.k8s.io/v1alpha1") return b, nil } @@ -83,7 +84,7 @@ func extractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManage // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithKind(value string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration { b.Kind = &value return b } @@ -91,7 +92,7 @@ func (b *ClusterCIDRApplyConfiguration) WithKind(value string) *ClusterCIDRApply // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithAPIVersion(value string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration { b.APIVersion = &value return b } @@ -99,7 +100,7 @@ func (b *ClusterCIDRApplyConfiguration) WithAPIVersion(value string) *ClusterCID // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithName(value string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -108,7 +109,7 @@ func (b *ClusterCIDRApplyConfiguration) WithName(value string) *ClusterCIDRApply // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithGenerateName(value string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -117,7 +118,7 @@ func (b *ClusterCIDRApplyConfiguration) WithGenerateName(value string) *ClusterC // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithNamespace(value string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b @@ -126,7 +127,7 @@ func (b *ClusterCIDRApplyConfiguration) WithNamespace(value string) *ClusterCIDR // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithUID(value types.UID) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -135,7 +136,7 @@ func (b *ClusterCIDRApplyConfiguration) WithUID(value types.UID) *ClusterCIDRApp // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithResourceVersion(value string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -144,7 +145,7 @@ func (b *ClusterCIDRApplyConfiguration) WithResourceVersion(value string) *Clust // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithGeneration(value int64) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -153,7 +154,7 @@ func (b *ClusterCIDRApplyConfiguration) WithGeneration(value int64) *ClusterCIDR // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -162,7 +163,7 @@ func (b *ClusterCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -171,7 +172,7 @@ func (b *ClusterCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -181,7 +182,7 @@ func (b *ClusterCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *ClusterCIDRApplyConfiguration) WithLabels(entries map[string]string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -196,7 +197,7 @@ func (b *ClusterCIDRApplyConfiguration) WithLabels(entries map[string]string) *C // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *ClusterCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -210,7 +211,7 @@ func (b *ClusterCIDRApplyConfiguration) WithAnnotations(entries map[string]strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ClusterCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -224,7 +225,7 @@ func (b *ClusterCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ClusterCIDRApplyConfiguration) WithFinalizers(values ...string) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -232,7 +233,7 @@ func (b *ClusterCIDRApplyConfiguration) WithFinalizers(values ...string) *Cluste return b } -func (b *ClusterCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *ServiceCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } @@ -241,7 +242,15 @@ func (b *ClusterCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithSpec(value *ClusterCIDRSpecApplyConfiguration) *ClusterCIDRApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithSpec(value *ServiceCIDRSpecApplyConfiguration) *ServiceCIDRApplyConfiguration { b.Spec = value return b } + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go new file mode 100644 index 0000000000..302d69194c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go @@ -0,0 +1,41 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ServiceCIDRSpecApplyConfiguration represents an declarative configuration of the ServiceCIDRSpec type for use +// with apply. +type ServiceCIDRSpecApplyConfiguration struct { + CIDRs []string `json:"cidrs,omitempty"` +} + +// ServiceCIDRSpecApplyConfiguration constructs an declarative configuration of the ServiceCIDRSpec type for use with +// apply. +func ServiceCIDRSpec() *ServiceCIDRSpecApplyConfiguration { + return &ServiceCIDRSpecApplyConfiguration{} +} + +// WithCIDRs adds the given value to the CIDRs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CIDRs field. +func (b *ServiceCIDRSpecApplyConfiguration) WithCIDRs(values ...string) *ServiceCIDRSpecApplyConfiguration { + for i := range values { + b.CIDRs = append(b.CIDRs, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go new file mode 100644 index 0000000000..5afc549a65 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceCIDRStatusApplyConfiguration represents an declarative configuration of the ServiceCIDRStatus type for use +// with apply. +type ServiceCIDRStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ServiceCIDRStatusApplyConfiguration constructs an declarative configuration of the ServiceCIDRStatus type for use with +// apply. +func ServiceCIDRStatus() *ServiceCIDRStatusApplyConfiguration { + return &ServiceCIDRStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ServiceCIDRStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ServiceCIDRStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedflexvolume.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedflexvolume.go deleted file mode 100644 index 30c3724cfe..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedflexvolume.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedFlexVolumeApplyConfiguration represents an declarative configuration of the AllowedFlexVolume type for use -// with apply. -type AllowedFlexVolumeApplyConfiguration struct { - Driver *string `json:"driver,omitempty"` -} - -// AllowedFlexVolumeApplyConfiguration constructs an declarative configuration of the AllowedFlexVolume type for use with -// apply. -func AllowedFlexVolume() *AllowedFlexVolumeApplyConfiguration { - return &AllowedFlexVolumeApplyConfiguration{} -} - -// WithDriver sets the Driver field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Driver field is set to the value of the last call. -func (b *AllowedFlexVolumeApplyConfiguration) WithDriver(value string) *AllowedFlexVolumeApplyConfiguration { - b.Driver = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedhostpath.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedhostpath.go deleted file mode 100644 index 493815d8d4..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedhostpath.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedHostPathApplyConfiguration represents an declarative configuration of the AllowedHostPath type for use -// with apply. -type AllowedHostPathApplyConfiguration struct { - PathPrefix *string `json:"pathPrefix,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` -} - -// AllowedHostPathApplyConfiguration constructs an declarative configuration of the AllowedHostPath type for use with -// apply. -func AllowedHostPath() *AllowedHostPathApplyConfiguration { - return &AllowedHostPathApplyConfiguration{} -} - -// WithPathPrefix sets the PathPrefix field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PathPrefix field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithPathPrefix(value string) *AllowedHostPathApplyConfiguration { - b.PathPrefix = &value - return b -} - -// WithReadOnly sets the ReadOnly field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnly field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithReadOnly(value bool) *AllowedHostPathApplyConfiguration { - b.ReadOnly = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/fsgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/fsgroupstrategyoptions.go deleted file mode 100644 index 06803b439d..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/fsgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// FSGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the FSGroupStrategyOptions type for use -// with apply. -type FSGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.FSGroupStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// FSGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the FSGroupStrategyOptions type for use with -// apply. -func FSGroupStrategyOptions() *FSGroupStrategyOptionsApplyConfiguration { - return &FSGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.FSGroupStrategyType) *FSGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *FSGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/hostportrange.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/hostportrange.go deleted file mode 100644 index 7c79688139..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/hostportrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// HostPortRangeApplyConfiguration represents an declarative configuration of the HostPortRange type for use -// with apply. -type HostPortRangeApplyConfiguration struct { - Min *int32 `json:"min,omitempty"` - Max *int32 `json:"max,omitempty"` -} - -// HostPortRangeApplyConfiguration constructs an declarative configuration of the HostPortRange type for use with -// apply. -func HostPortRange() *HostPortRangeApplyConfiguration { - return &HostPortRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMin(value int32) *HostPortRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMax(value int32) *HostPortRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/idrange.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/idrange.go deleted file mode 100644 index af46f76581..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/idrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// IDRangeApplyConfiguration represents an declarative configuration of the IDRange type for use -// with apply. -type IDRangeApplyConfiguration struct { - Min *int64 `json:"min,omitempty"` - Max *int64 `json:"max,omitempty"` -} - -// IDRangeApplyConfiguration constructs an declarative configuration of the IDRange type for use with -// apply. -func IDRange() *IDRangeApplyConfiguration { - return &IDRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMin(value int64) *IDRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMax(value int64) *IDRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicyspec.go deleted file mode 100644 index bf951cf56b..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicyspec.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// PodSecurityPolicySpecApplyConfiguration represents an declarative configuration of the PodSecurityPolicySpec type for use -// with apply. -type PodSecurityPolicySpecApplyConfiguration struct { - Privileged *bool `json:"privileged,omitempty"` - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty"` - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty"` - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty"` - Volumes []v1beta1.FSType `json:"volumes,omitempty"` - HostNetwork *bool `json:"hostNetwork,omitempty"` - HostPorts []HostPortRangeApplyConfiguration `json:"hostPorts,omitempty"` - HostPID *bool `json:"hostPID,omitempty"` - HostIPC *bool `json:"hostIPC,omitempty"` - SELinux *SELinuxStrategyOptionsApplyConfiguration `json:"seLinux,omitempty"` - RunAsUser *RunAsUserStrategyOptionsApplyConfiguration `json:"runAsUser,omitempty"` - RunAsGroup *RunAsGroupStrategyOptionsApplyConfiguration `json:"runAsGroup,omitempty"` - SupplementalGroups *SupplementalGroupsStrategyOptionsApplyConfiguration `json:"supplementalGroups,omitempty"` - FSGroup *FSGroupStrategyOptionsApplyConfiguration `json:"fsGroup,omitempty"` - ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty"` - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"` - AllowedHostPaths []AllowedHostPathApplyConfiguration `json:"allowedHostPaths,omitempty"` - AllowedFlexVolumes []AllowedFlexVolumeApplyConfiguration `json:"allowedFlexVolumes,omitempty"` - AllowedCSIDrivers []AllowedCSIDriverApplyConfiguration `json:"allowedCSIDrivers,omitempty"` - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"` - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty"` - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty"` - RuntimeClass *RuntimeClassStrategyOptionsApplyConfiguration `json:"runtimeClass,omitempty"` -} - -// PodSecurityPolicySpecApplyConfiguration constructs an declarative configuration of the PodSecurityPolicySpec type for use with -// apply. -func PodSecurityPolicySpec() *PodSecurityPolicySpecApplyConfiguration { - return &PodSecurityPolicySpecApplyConfiguration{} -} - -// WithPrivileged sets the Privileged field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Privileged field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithPrivileged(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.Privileged = &value - return b -} - -// WithDefaultAddCapabilities adds the given value to the DefaultAddCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the DefaultAddCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAddCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.DefaultAddCapabilities = append(b.DefaultAddCapabilities, values[i]) - } - return b -} - -// WithRequiredDropCapabilities adds the given value to the RequiredDropCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the RequiredDropCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRequiredDropCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.RequiredDropCapabilities = append(b.RequiredDropCapabilities, values[i]) - } - return b -} - -// WithAllowedCapabilities adds the given value to the AllowedCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedCapabilities = append(b.AllowedCapabilities, values[i]) - } - return b -} - -// WithVolumes adds the given value to the Volumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Volumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithVolumes(values ...v1beta1.FSType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.Volumes = append(b.Volumes, values[i]) - } - return b -} - -// WithHostNetwork sets the HostNetwork field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostNetwork field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostNetwork(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostNetwork = &value - return b -} - -// WithHostPorts adds the given value to the HostPorts field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the HostPorts field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPorts(values ...*HostPortRangeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithHostPorts") - } - b.HostPorts = append(b.HostPorts, *values[i]) - } - return b -} - -// WithHostPID sets the HostPID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostPID field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPID(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostPID = &value - return b -} - -// WithHostIPC sets the HostIPC field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostIPC field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostIPC(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostIPC = &value - return b -} - -// WithSELinux sets the SELinux field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinux field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSELinux(value *SELinuxStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SELinux = value - return b -} - -// WithRunAsUser sets the RunAsUser field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsUser field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsUser(value *RunAsUserStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsUser = value - return b -} - -// WithRunAsGroup sets the RunAsGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsGroup(value *RunAsGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsGroup = value - return b -} - -// WithSupplementalGroups sets the SupplementalGroups field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SupplementalGroups field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSupplementalGroups(value *SupplementalGroupsStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SupplementalGroups = value - return b -} - -// WithFSGroup sets the FSGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FSGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithFSGroup(value *FSGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.FSGroup = value - return b -} - -// WithReadOnlyRootFilesystem sets the ReadOnlyRootFilesystem field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnlyRootFilesystem field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithReadOnlyRootFilesystem(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.ReadOnlyRootFilesystem = &value - return b -} - -// WithDefaultAllowPrivilegeEscalation sets the DefaultAllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultAllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.DefaultAllowPrivilegeEscalation = &value - return b -} - -// WithAllowPrivilegeEscalation sets the AllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.AllowPrivilegeEscalation = &value - return b -} - -// WithAllowedHostPaths adds the given value to the AllowedHostPaths field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedHostPaths field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedHostPaths(values ...*AllowedHostPathApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedHostPaths") - } - b.AllowedHostPaths = append(b.AllowedHostPaths, *values[i]) - } - return b -} - -// WithAllowedFlexVolumes adds the given value to the AllowedFlexVolumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedFlexVolumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedFlexVolumes(values ...*AllowedFlexVolumeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedFlexVolumes") - } - b.AllowedFlexVolumes = append(b.AllowedFlexVolumes, *values[i]) - } - return b -} - -// WithAllowedCSIDrivers adds the given value to the AllowedCSIDrivers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCSIDrivers field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCSIDrivers(values ...*AllowedCSIDriverApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedCSIDrivers") - } - b.AllowedCSIDrivers = append(b.AllowedCSIDrivers, *values[i]) - } - return b -} - -// WithAllowedUnsafeSysctls adds the given value to the AllowedUnsafeSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedUnsafeSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedUnsafeSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedUnsafeSysctls = append(b.AllowedUnsafeSysctls, values[i]) - } - return b -} - -// WithForbiddenSysctls adds the given value to the ForbiddenSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ForbiddenSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithForbiddenSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.ForbiddenSysctls = append(b.ForbiddenSysctls, values[i]) - } - return b -} - -// WithAllowedProcMountTypes adds the given value to the AllowedProcMountTypes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedProcMountTypes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedProcMountTypes(values ...v1.ProcMountType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedProcMountTypes = append(b.AllowedProcMountTypes, values[i]) - } - return b -} - -// WithRuntimeClass sets the RuntimeClass field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RuntimeClass field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRuntimeClass(value *RuntimeClassStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RuntimeClass = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go deleted file mode 100644 index fcfcfbe6b9..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// RunAsGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsGroupStrategyOptions type for use -// with apply. -type RunAsGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsGroupStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsGroupStrategyOptions type for use with -// apply. -func RunAsGroupStrategyOptions() *RunAsGroupStrategyOptionsApplyConfiguration { - return &RunAsGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsGroupStrategy) *RunAsGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasuserstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasuserstrategyoptions.go deleted file mode 100644 index a6d6ee58e3..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasuserstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// RunAsUserStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsUserStrategyOptions type for use -// with apply. -type RunAsUserStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsUserStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsUserStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsUserStrategyOptions type for use with -// apply. -func RunAsUserStrategyOptions() *RunAsUserStrategyOptionsApplyConfiguration { - return &RunAsUserStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsUserStrategy) *RunAsUserStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsUserStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runtimeclassstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runtimeclassstrategyoptions.go deleted file mode 100644 index c19a7ce617..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runtimeclassstrategyoptions.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// RuntimeClassStrategyOptionsApplyConfiguration represents an declarative configuration of the RuntimeClassStrategyOptions type for use -// with apply. -type RuntimeClassStrategyOptionsApplyConfiguration struct { - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames,omitempty"` - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty"` -} - -// RuntimeClassStrategyOptionsApplyConfiguration constructs an declarative configuration of the RuntimeClassStrategyOptions type for use with -// apply. -func RuntimeClassStrategyOptions() *RuntimeClassStrategyOptionsApplyConfiguration { - return &RuntimeClassStrategyOptionsApplyConfiguration{} -} - -// WithAllowedRuntimeClassNames adds the given value to the AllowedRuntimeClassNames field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedRuntimeClassNames field. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithAllowedRuntimeClassNames(values ...string) *RuntimeClassStrategyOptionsApplyConfiguration { - for i := range values { - b.AllowedRuntimeClassNames = append(b.AllowedRuntimeClassNames, values[i]) - } - return b -} - -// WithDefaultRuntimeClassName sets the DefaultRuntimeClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultRuntimeClassName field is set to the value of the last call. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithDefaultRuntimeClassName(value string) *RuntimeClassStrategyOptionsApplyConfiguration { - b.DefaultRuntimeClassName = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/selinuxstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/selinuxstrategyoptions.go deleted file mode 100644 index de7ede618e..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/selinuxstrategyoptions.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// SELinuxStrategyOptionsApplyConfiguration represents an declarative configuration of the SELinuxStrategyOptions type for use -// with apply. -type SELinuxStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SELinuxStrategy `json:"rule,omitempty"` - SELinuxOptions *v1.SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` -} - -// SELinuxStrategyOptionsApplyConfiguration constructs an declarative configuration of the SELinuxStrategyOptions type for use with -// apply. -func SELinuxStrategyOptions() *SELinuxStrategyOptionsApplyConfiguration { - return &SELinuxStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SELinuxStrategy) *SELinuxStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithSELinuxOptions sets the SELinuxOptions field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinuxOptions field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithSELinuxOptions(value *v1.SELinuxOptionsApplyConfiguration) *SELinuxStrategyOptionsApplyConfiguration { - b.SELinuxOptions = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/supplementalgroupsstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/supplementalgroupsstrategyoptions.go deleted file mode 100644 index 9e4a9bb2ca..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/supplementalgroupsstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// SupplementalGroupsStrategyOptionsApplyConfiguration represents an declarative configuration of the SupplementalGroupsStrategyOptions type for use -// with apply. -type SupplementalGroupsStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SupplementalGroupsStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// SupplementalGroupsStrategyOptionsApplyConfiguration constructs an declarative configuration of the SupplementalGroupsStrategyOptions type for use with -// apply. -func SupplementalGroupsStrategyOptions() *SupplementalGroupsStrategyOptionsApplyConfiguration { - return &SupplementalGroupsStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SupplementalGroupsStrategyType) *SupplementalGroupsStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *SupplementalGroupsStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go similarity index 59% rename from vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go rename to vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go index 46cfc4de1e..9d4c476259 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1beta1 +package v1alpha1 import ( - policyv1beta1 "k8s.io/api/policy/v1beta1" + v1alpha1 "k8s.io/api/storage/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,63 +27,64 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodSecurityPolicyApplyConfiguration represents an declarative configuration of the PodSecurityPolicy type for use +// VolumeAttributesClassApplyConfiguration represents an declarative configuration of the VolumeAttributesClass type for use // with apply. -type PodSecurityPolicyApplyConfiguration struct { +type VolumeAttributesClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSecurityPolicySpecApplyConfiguration `json:"spec,omitempty"` + DriverName *string `json:"driverName,omitempty"` + Parameters map[string]string `json:"parameters,omitempty"` } -// PodSecurityPolicy constructs an declarative configuration of the PodSecurityPolicy type for use with +// VolumeAttributesClass constructs an declarative configuration of the VolumeAttributesClass type for use with // apply. -func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration { - b := &PodSecurityPolicyApplyConfiguration{} +func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration { + b := &VolumeAttributesClassApplyConfiguration{} b.WithName(name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("policy/v1beta1") + b.WithKind("VolumeAttributesClass") + b.WithAPIVersion("storage.k8s.io/v1alpha1") return b } -// ExtractPodSecurityPolicy extracts the applied configuration owned by fieldManager from -// podSecurityPolicy. If no managedFields are found in podSecurityPolicy for fieldManager, a -// PodSecurityPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from +// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a +// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// podSecurityPolicy must be a unmodified PodSecurityPolicy API object that was retrieved from the Kubernetes API. -// ExtractPodSecurityPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API. +// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodSecurityPolicy(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "") +func ExtractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { + return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "") } -// ExtractPodSecurityPolicyStatus is the same as ExtractPodSecurityPolicy except +// ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSecurityPolicyStatus(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "status") +func ExtractVolumeAttributesClassStatus(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { + return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status") } -func extractPodSecurityPolicy(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string, subresource string) (*PodSecurityPolicyApplyConfiguration, error) { - b := &PodSecurityPolicyApplyConfiguration{} - err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.policy.v1beta1.PodSecurityPolicy"), fieldManager, b, subresource) +func extractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) { + b := &VolumeAttributesClassApplyConfiguration{} + err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1alpha1.VolumeAttributesClass"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(podSecurityPolicy.Name) + b.WithName(volumeAttributesClass.Name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("policy/v1beta1") + b.WithKind("VolumeAttributesClass") + b.WithAPIVersion("storage.k8s.io/v1alpha1") return b, nil } // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *VolumeAttributesClassApplyConfiguration { b.Kind = &value return b } @@ -91,7 +92,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurit // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *VolumeAttributesClassApplyConfiguration { b.APIVersion = &value return b } @@ -99,7 +100,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodS // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -108,7 +109,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurit // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -117,7 +118,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *Po // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b @@ -126,7 +127,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSe // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -135,7 +136,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecur // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -144,7 +145,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -153,7 +154,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSe // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -162,7 +163,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1 // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -171,7 +172,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1 // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -181,7 +182,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(val // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -196,7 +197,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]stri // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -210,7 +211,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -224,7 +225,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1. // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) *PodSecurityPolicyApplyConfiguration { +func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -232,16 +233,30 @@ func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) * return b } -func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *VolumeAttributesClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } } -// WithSpec sets the Spec field in the declarative configuration to the given value +// WithDriverName sets the DriverName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithSpec(value *PodSecurityPolicySpecApplyConfiguration) *PodSecurityPolicyApplyConfiguration { - b.Spec = value +// If called multiple times, the DriverName field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDriverName(value string) *VolumeAttributesClassApplyConfiguration { + b.DriverName = &value + return b +} + +// WithParameters puts the entries into the Parameters field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Parameters field, +// overwriting an existing map entries in Parameters field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + if b.Parameters == nil && len(entries) > 0 { + b.Parameters = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Parameters[k] = v + } return b } diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index 102ce49bf5..df0e0f9974 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -19,6 +19,7 @@ package discovery import ( "context" "encoding/json" + goerrors "errors" "fmt" "mime" "net/http" @@ -419,6 +420,12 @@ func (e *ErrGroupDiscoveryFailed) Error() string { return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", ")) } +// Is makes it possible for the callers to use `errors.Is(` helper on errors wrapped with ErrGroupDiscoveryFailed error. +func (e *ErrGroupDiscoveryFailed) Is(target error) bool { + _, ok := target.(*ErrGroupDiscoveryFailed) + return ok +} + // IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover // a complete list of APIs for the client to use. func IsGroupDiscoveryFailedError(err error) bool { @@ -426,6 +433,16 @@ func IsGroupDiscoveryFailedError(err error) bool { return err != nil && ok } +// GroupDiscoveryFailedErrorGroups returns true if the error is an ErrGroupDiscoveryFailed error, +// along with the map of group versions that failed discovery. +func GroupDiscoveryFailedErrorGroups(err error) (map[schema.GroupVersion]error, bool) { + var groupDiscoveryError *ErrGroupDiscoveryFailed + if err != nil && goerrors.As(err, &groupDiscoveryError) { + return groupDiscoveryError.Groups, true + } + return nil, false +} + func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { var sgs *metav1.APIGroupList var resources []*metav1.APIResourceList @@ -637,16 +654,7 @@ func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", openAPIV2mimePb).Do(context.TODO()).Raw() if err != nil { - if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) { - // single endpoint not found/registered in old server, try to fetch old endpoint - // TODO: remove this when kubectl/client-go don't work with 1.9 server - data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do(context.TODO()).Raw() - if err != nil { - return nil, err - } - } else { - return nil, err - } + return nil, err } document := &openapi_v2.Document{} err = proto.Unmarshal(data, document) diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 6345f2fb62..a0095d086f 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -52,7 +52,7 @@ import ( eventsv1 "k8s.io/client-go/kubernetes/typed/events/v1" eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" flowcontrolv1beta3 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" @@ -109,7 +109,7 @@ type Interface interface { EventsV1() eventsv1.EventsV1Interface EventsV1beta1() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface - FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface + FlowcontrolV1() flowcontrolv1.FlowcontrolV1Interface FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface FlowcontrolV1beta3() flowcontrolv1beta3.FlowcontrolV1beta3Interface @@ -165,7 +165,7 @@ type Clientset struct { eventsV1 *eventsv1.EventsV1Client eventsV1beta1 *eventsv1beta1.EventsV1beta1Client extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client - flowcontrolV1alpha1 *flowcontrolv1alpha1.FlowcontrolV1alpha1Client + flowcontrolV1 *flowcontrolv1.FlowcontrolV1Client flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client flowcontrolV1beta2 *flowcontrolv1beta2.FlowcontrolV1beta2Client flowcontrolV1beta3 *flowcontrolv1beta3.FlowcontrolV1beta3Client @@ -334,9 +334,9 @@ func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Inter return c.extensionsV1beta1 } -// FlowcontrolV1alpha1 retrieves the FlowcontrolV1alpha1Client -func (c *Clientset) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface { - return c.flowcontrolV1alpha1 +// FlowcontrolV1 retrieves the FlowcontrolV1Client +func (c *Clientset) FlowcontrolV1() flowcontrolv1.FlowcontrolV1Interface { + return c.flowcontrolV1 } // FlowcontrolV1beta1 retrieves the FlowcontrolV1beta1Client @@ -604,7 +604,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.flowcontrolV1alpha1, err = flowcontrolv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + cs.flowcontrolV1, err = flowcontrolv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -742,7 +742,7 @@ func New(c rest.Interface) *Clientset { cs.eventsV1 = eventsv1.New(c) cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) - cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.New(c) + cs.flowcontrolV1 = flowcontrolv1.New(c) cs.flowcontrolV1beta1 = flowcontrolv1beta1.New(c) cs.flowcontrolV1beta2 = flowcontrolv1beta2.New(c) cs.flowcontrolV1beta3 = flowcontrolv1beta3.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go index 25de81caa8..fc529873f1 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -82,8 +82,8 @@ import ( fakeeventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1/fake" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" fakeextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake" - flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" - fakeflowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake" + flowcontrolv1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" + fakeflowcontrolv1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake" flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" fakeflowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake" flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" @@ -324,9 +324,9 @@ func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Inter return &fakeextensionsv1beta1.FakeExtensionsV1beta1{Fake: &c.Fake} } -// FlowcontrolV1alpha1 retrieves the FlowcontrolV1alpha1Client -func (c *Clientset) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface { - return &fakeflowcontrolv1alpha1.FakeFlowcontrolV1alpha1{Fake: &c.Fake} +// FlowcontrolV1 retrieves the FlowcontrolV1Client +func (c *Clientset) FlowcontrolV1() flowcontrolv1.FlowcontrolV1Interface { + return &fakeflowcontrolv1.FakeFlowcontrolV1{Fake: &c.Fake} } // FlowcontrolV1beta1 retrieves the FlowcontrolV1beta1Client diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go index 0091841858..6b80d68339 100644 --- a/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go @@ -48,7 +48,7 @@ import ( eventsv1 "k8s.io/api/events/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" @@ -110,7 +110,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ eventsv1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, - flowcontrolv1alpha1.AddToScheme, + flowcontrolv1.AddToScheme, flowcontrolv1beta1.AddToScheme, flowcontrolv1beta2.AddToScheme, flowcontrolv1beta3.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 64d3ce2a7b..f44055fbfc 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -48,7 +48,7 @@ import ( eventsv1 "k8s.io/api/events/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" @@ -110,7 +110,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ eventsv1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, - flowcontrolv1alpha1.AddToScheme, + flowcontrolv1.AddToScheme, flowcontrolv1beta1.AddToScheme, flowcontrolv1beta2.AddToScheme, flowcontrolv1beta3.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/doc.go similarity index 97% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/doc.go index df51baa4d4..3af5d054f1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. -package v1alpha1 +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowcontrol_client.go similarity index 72% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowcontrol_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowcontrol_client.go index 72d5eff0c2..d15f4b2426 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowcontrol_client.go @@ -19,26 +19,26 @@ limitations under the License. package fake import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" + v1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) -type FakeFlowcontrolV1alpha1 struct { +type FakeFlowcontrolV1 struct { *testing.Fake } -func (c *FakeFlowcontrolV1alpha1) FlowSchemas() v1alpha1.FlowSchemaInterface { +func (c *FakeFlowcontrolV1) FlowSchemas() v1.FlowSchemaInterface { return &FakeFlowSchemas{c} } -func (c *FakeFlowcontrolV1alpha1) PriorityLevelConfigurations() v1alpha1.PriorityLevelConfigurationInterface { +func (c *FakeFlowcontrolV1) PriorityLevelConfigurations() v1.PriorityLevelConfigurationInterface { return &FakePriorityLevelConfigurations{c} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FakeFlowcontrolV1alpha1) RESTClient() rest.Interface { +func (c *FakeFlowcontrolV1) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go similarity index 67% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go index f367638892..922a60d89b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go @@ -23,38 +23,38 @@ import ( json "encoding/json" "fmt" - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1alpha1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" testing "k8s.io/client-go/testing" ) // FakeFlowSchemas implements FlowSchemaInterface type FakeFlowSchemas struct { - Fake *FakeFlowcontrolV1alpha1 + Fake *FakeFlowcontrolV1 } -var flowschemasResource = v1alpha1.SchemeGroupVersion.WithResource("flowschemas") +var flowschemasResource = v1.SchemeGroupVersion.WithResource("flowschemas") -var flowschemasKind = v1alpha1.SchemeGroupVersion.WithKind("FlowSchema") +var flowschemasKind = v1.SchemeGroupVersion.WithKind("FlowSchema") // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FlowSchema, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1.FlowSchema{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.FlowSchema), err + return obj.(*v1.FlowSchema), err } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) { +func (c *FakeFlowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FlowSchemaList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1alpha1.FlowSchemaList{}) + Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1.FlowSchemaList{}) if obj == nil { return nil, err } @@ -63,8 +63,8 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result if label == nil { label = labels.Everything() } - list := &v1alpha1.FlowSchemaList{ListMeta: obj.(*v1alpha1.FlowSchemaList).ListMeta} - for _, item := range obj.(*v1alpha1.FlowSchemaList).Items { + list := &v1.FlowSchemaList{ListMeta: obj.(*v1.FlowSchemaList).ListMeta} + for _, item := range obj.(*v1.FlowSchemaList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -73,69 +73,69 @@ func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result } // Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakeFlowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (result *v1.FlowSchema, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1.FlowSchema{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.FlowSchema), err + return obj.(*v1.FlowSchema), err } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1.FlowSchema{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.FlowSchema), err + return obj.(*v1.FlowSchema), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) { +func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1.FlowSchema{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.FlowSchema), err + return obj.(*v1.FlowSchema), err } // Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1.FlowSchema{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha1.FlowSchemaList{}) + _, err := c.Fake.Invokes(action, &v1.FlowSchemaList{}) return err } // Patch applies the patch and returns the patched flowSchema. -func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1.FlowSchema{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.FlowSchema), err + return obj.(*v1.FlowSchema), err } // Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { if flowSchema == nil { return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") } @@ -148,16 +148,16 @@ func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1al return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1.FlowSchema{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.FlowSchema), err + return obj.(*v1.FlowSchema), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { if flowSchema == nil { return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") } @@ -170,9 +170,9 @@ func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontr return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.FlowSchema{}) + Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1.FlowSchema{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.FlowSchema), err + return obj.(*v1.FlowSchema), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go similarity index 64% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go index 6512c36f69..27d9586748 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake/fake_prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go @@ -23,38 +23,38 @@ import ( json "encoding/json" "fmt" - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1alpha1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" testing "k8s.io/client-go/testing" ) // FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface type FakePriorityLevelConfigurations struct { - Fake *FakeFlowcontrolV1alpha1 + Fake *FakeFlowcontrolV1 } -var prioritylevelconfigurationsResource = v1alpha1.SchemeGroupVersion.WithResource("prioritylevelconfigurations") +var prioritylevelconfigurationsResource = v1.SchemeGroupVersion.WithResource("prioritylevelconfigurations") -var prioritylevelconfigurationsKind = v1alpha1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") +var prioritylevelconfigurationsKind = v1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1.PriorityLevelConfiguration{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PriorityLevelConfiguration), err + return obj.(*v1.PriorityLevelConfiguration), err } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) { +func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityLevelConfigurationList, err error) { obj, err := c.Fake. - Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1alpha1.PriorityLevelConfigurationList{}) + Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1.PriorityLevelConfigurationList{}) if obj == nil { return nil, err } @@ -63,8 +63,8 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.List if label == nil { label = labels.Everything() } - list := &v1alpha1.PriorityLevelConfigurationList{ListMeta: obj.(*v1alpha1.PriorityLevelConfigurationList).ListMeta} - for _, item := range obj.(*v1alpha1.PriorityLevelConfigurationList).Items { + list := &v1.PriorityLevelConfigurationList{ListMeta: obj.(*v1.PriorityLevelConfigurationList).ListMeta} + for _, item := range obj.(*v1.PriorityLevelConfigurationList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -73,69 +73,69 @@ func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.List } // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (result *v1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PriorityLevelConfiguration), err + return obj.(*v1.PriorityLevelConfiguration), err } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PriorityLevelConfiguration), err + return obj.(*v1.PriorityLevelConfiguration), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) { +func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) { obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PriorityLevelConfiguration), err + return obj.(*v1.PriorityLevelConfiguration), err } // Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1.PriorityLevelConfiguration{}) return err } // DeleteCollection deletes a collection of objects. -func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha1.PriorityLevelConfigurationList{}) + _, err := c.Fake.Invokes(action, &v1.PriorityLevelConfigurationList{}) return err } // Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) { obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1.PriorityLevelConfiguration{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PriorityLevelConfiguration), err + return obj.(*v1.PriorityLevelConfiguration), err } // Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { if priorityLevelConfiguration == nil { return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") } @@ -148,16 +148,16 @@ func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLev return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1.PriorityLevelConfiguration{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PriorityLevelConfiguration), err + return obj.(*v1.PriorityLevelConfiguration), err } // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { if priorityLevelConfiguration == nil { return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") } @@ -170,9 +170,9 @@ func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, prior return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.PriorityLevelConfiguration{}) + Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1.PriorityLevelConfiguration{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.PriorityLevelConfiguration), err + return obj.(*v1.PriorityLevelConfiguration), err } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go similarity index 64% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go index c6f2d94056..3d7d93ef14 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go @@ -16,39 +16,39 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( "net/http" - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type FlowcontrolV1alpha1Interface interface { +type FlowcontrolV1Interface interface { RESTClient() rest.Interface FlowSchemasGetter PriorityLevelConfigurationsGetter } -// FlowcontrolV1alpha1Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. -type FlowcontrolV1alpha1Client struct { +// FlowcontrolV1Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. +type FlowcontrolV1Client struct { restClient rest.Interface } -func (c *FlowcontrolV1alpha1Client) FlowSchemas() FlowSchemaInterface { +func (c *FlowcontrolV1Client) FlowSchemas() FlowSchemaInterface { return newFlowSchemas(c) } -func (c *FlowcontrolV1alpha1Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { +func (c *FlowcontrolV1Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { return newPriorityLevelConfigurations(c) } -// NewForConfig creates a new FlowcontrolV1alpha1Client for the given config. +// NewForConfig creates a new FlowcontrolV1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*FlowcontrolV1alpha1Client, error) { +func NewForConfig(c *rest.Config) (*FlowcontrolV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -60,9 +60,9 @@ func NewForConfig(c *rest.Config) (*FlowcontrolV1alpha1Client, error) { return NewForConfigAndClient(&config, httpClient) } -// NewForConfigAndClient creates a new FlowcontrolV1alpha1Client for the given config and http client. +// NewForConfigAndClient creates a new FlowcontrolV1Client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1alpha1Client, error) { +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -71,12 +71,12 @@ func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1alpha1 if err != nil { return nil, err } - return &FlowcontrolV1alpha1Client{client}, nil + return &FlowcontrolV1Client{client}, nil } -// NewForConfigOrDie creates a new FlowcontrolV1alpha1Client for the given config and +// NewForConfigOrDie creates a new FlowcontrolV1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -84,13 +84,13 @@ func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1alpha1Client { return client } -// New creates a new FlowcontrolV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *FlowcontrolV1alpha1Client { - return &FlowcontrolV1alpha1Client{c} +// New creates a new FlowcontrolV1Client for the given RESTClient. +func New(c rest.Interface) *FlowcontrolV1Client { + return &FlowcontrolV1Client{c} } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() @@ -104,7 +104,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FlowcontrolV1alpha1Client) RESTClient() rest.Interface { +func (c *FlowcontrolV1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go similarity index 69% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go index 95baf82519..bd36c5e6a4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1alpha1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,17 +41,17 @@ type FlowSchemasGetter interface { // FlowSchemaInterface has methods to work with FlowSchema resources. type FlowSchemaInterface interface { - Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (*v1alpha1.FlowSchema, error) - Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) - UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.FlowSchema, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.FlowSchemaList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) - Apply(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) - ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) + Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (*v1.FlowSchema, error) + Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.FlowSchema, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.FlowSchemaList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) + ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) FlowSchemaExpansion } @@ -61,15 +61,15 @@ type flowSchemas struct { } // newFlowSchemas returns a FlowSchemas -func newFlowSchemas(c *FlowcontrolV1alpha1Client) *flowSchemas { +func newFlowSchemas(c *FlowcontrolV1Client) *flowSchemas { return &flowSchemas{ client: c.RESTClient(), } } // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} +func (c *flowSchemas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FlowSchema, err error) { + result = &v1.FlowSchema{} err = c.client.Get(). Resource("flowschemas"). Name(name). @@ -80,12 +80,12 @@ func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOption } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) { +func (c *flowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FlowSchemaList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.FlowSchemaList{} + result = &v1.FlowSchemaList{} err = c.client.Get(). Resource("flowschemas"). VersionedParams(&opts, scheme.ParameterCodec). @@ -96,7 +96,7 @@ func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1 } // Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *flowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -110,8 +110,8 @@ func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Int } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} +func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (result *v1.FlowSchema, err error) { + result = &v1.FlowSchema{} err = c.client.Post(). Resource("flowschemas"). VersionedParams(&opts, scheme.ParameterCodec). @@ -122,8 +122,8 @@ func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchem } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} +func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { + result = &v1.FlowSchema{} err = c.client.Put(). Resource("flowschemas"). Name(flowSchema.Name). @@ -136,8 +136,8 @@ func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchem // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} +func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { + result = &v1.FlowSchema{} err = c.client.Put(). Resource("flowschemas"). Name(flowSchema.Name). @@ -150,7 +150,7 @@ func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.Flo } // Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *flowSchemas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { return c.client.Delete(). Resource("flowschemas"). Name(name). @@ -160,7 +160,7 @@ func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOpt } // DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *flowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { var timeout time.Duration if listOpts.TimeoutSeconds != nil { timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second @@ -175,8 +175,8 @@ func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOption } // Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} +func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) { + result = &v1.FlowSchema{} err = c.client.Patch(pt). Resource("flowschemas"). Name(name). @@ -189,7 +189,7 @@ func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType } // Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { if flowSchema == nil { return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") } @@ -202,7 +202,7 @@ func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1alpha1 if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } - result = &v1alpha1.FlowSchema{} + result = &v1.FlowSchema{} err = c.client.Patch(types.ApplyPatchType). Resource("flowschemas"). Name(*name). @@ -215,7 +215,7 @@ func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1alpha1 // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) { +func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { if flowSchema == nil { return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") } @@ -230,7 +230,7 @@ func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1 return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } - result = &v1alpha1.FlowSchema{} + result = &v1.FlowSchema{} err = c.client.Patch(types.ApplyPatchType). Resource("flowschemas"). Name(*name). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/generated_expansion.go similarity index 97% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/generated_expansion.go index 065b5e6b42..9906773887 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/generated_expansion.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1 type FlowSchemaExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go similarity index 69% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go index 327b727c18..797fe94035 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1alpha1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,17 +41,17 @@ type PriorityLevelConfigurationsGetter interface { // PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. type PriorityLevelConfigurationInterface interface { - Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1alpha1.PriorityLevelConfiguration, error) - Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) - UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PriorityLevelConfiguration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PriorityLevelConfigurationList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) - Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) - ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) + Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (*v1.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PriorityLevelConfiguration, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PriorityLevelConfigurationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) + ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } @@ -61,15 +61,15 @@ type priorityLevelConfigurations struct { } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations -func newPriorityLevelConfigurations(c *FlowcontrolV1alpha1Client) *priorityLevelConfigurations { +func newPriorityLevelConfigurations(c *FlowcontrolV1Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ client: c.RESTClient(), } } // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityLevelConfiguration, err error) { + result = &v1.PriorityLevelConfiguration{} err = c.client.Get(). Resource("prioritylevelconfigurations"). Name(name). @@ -80,12 +80,12 @@ func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, opti } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) { +func (c *priorityLevelConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityLevelConfigurationList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.PriorityLevelConfigurationList{} + result = &v1.PriorityLevelConfigurationList{} err = c.client.Get(). Resource("prioritylevelconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). @@ -96,7 +96,7 @@ func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOpti } // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -110,8 +110,8 @@ func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOpt } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (result *v1.PriorityLevelConfiguration, err error) { + result = &v1.PriorityLevelConfiguration{} err = c.client.Post(). Resource("prioritylevelconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). @@ -122,8 +122,8 @@ func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelC } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { + result = &v1.PriorityLevelConfiguration{} err = c.client.Put(). Resource("prioritylevelconfigurations"). Name(priorityLevelConfiguration.Name). @@ -136,8 +136,8 @@ func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelC // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { + result = &v1.PriorityLevelConfiguration{} err = c.client.Put(). Resource("prioritylevelconfigurations"). Name(priorityLevelConfiguration.Name). @@ -150,7 +150,7 @@ func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priority } // Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { return c.client.Delete(). Resource("prioritylevelconfigurations"). Name(name). @@ -160,7 +160,7 @@ func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, o } // DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { var timeout time.Duration if listOpts.TimeoutSeconds != nil { timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second @@ -175,8 +175,8 @@ func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts } // Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) { + result = &v1.PriorityLevelConfiguration{} err = c.client.Patch(pt). Resource("prioritylevelconfigurations"). Name(name). @@ -189,7 +189,7 @@ func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt } // Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { if priorityLevelConfiguration == nil { return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") } @@ -202,7 +202,7 @@ func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelCo if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } - result = &v1alpha1.PriorityLevelConfiguration{} + result = &v1.PriorityLevelConfiguration{} err = c.client.Patch(types.ApplyPatchType). Resource("prioritylevelconfigurations"). Name(*name). @@ -215,7 +215,7 @@ func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelCo // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { if priorityLevelConfiguration == nil { return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") } @@ -230,7 +230,7 @@ func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityL return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } - result = &v1alpha1.PriorityLevelConfiguration{} + result = &v1.PriorityLevelConfiguration{} err = c.client.Patch(types.ApplyPatchType). Resource("prioritylevelconfigurations"). Name(*name). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go deleted file mode 100644 index 9df76351db..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ClusterCIDRsGetter has a method to return a ClusterCIDRInterface. -// A group's client should implement this interface. -type ClusterCIDRsGetter interface { - ClusterCIDRs() ClusterCIDRInterface -} - -// ClusterCIDRInterface has methods to work with ClusterCIDR resources. -type ClusterCIDRInterface interface { - Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (*v1alpha1.ClusterCIDR, error) - Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (*v1alpha1.ClusterCIDR, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterCIDR, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterCIDRList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) - Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) - ClusterCIDRExpansion -} - -// clusterCIDRs implements ClusterCIDRInterface -type clusterCIDRs struct { - client rest.Interface -} - -// newClusterCIDRs returns a ClusterCIDRs -func newClusterCIDRs(c *NetworkingV1alpha1Client) *clusterCIDRs { - return &clusterCIDRs{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any. -func (c *clusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Get(). - Resource("clustercidrs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors. -func (c *clusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterCIDRList{} - err = c.client.Get(). - Resource("clustercidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterCIDRs. -func (c *clusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clustercidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *clusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Post(). - Resource("clustercidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterCIDR). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *clusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Put(). - Resource("clustercidrs"). - Name(clusterCIDR.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterCIDR). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs. -func (c *clusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clustercidrs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clustercidrs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterCIDR. -func (c *clusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Patch(pt). - Resource("clustercidrs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR. -func (c *clusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) { - if clusterCIDR == nil { - return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterCIDR) - if err != nil { - return nil, err - } - name := clusterCIDR.Name - if name == nil { - return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply") - } - result = &v1alpha1.ClusterCIDR{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clustercidrs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go deleted file mode 100644 index 592e9fc63d..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeClusterCIDRs implements ClusterCIDRInterface -type FakeClusterCIDRs struct { - Fake *FakeNetworkingV1alpha1 -} - -var clustercidrsResource = v1alpha1.SchemeGroupVersion.WithResource("clustercidrs") - -var clustercidrsKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterCIDR") - -// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any. -func (c *FakeClusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clustercidrsResource, name), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors. -func (c *FakeClusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(clustercidrsResource, clustercidrsKind, opts), &v1alpha1.ClusterCIDRList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ClusterCIDRList{ListMeta: obj.(*v1alpha1.ClusterCIDRList).ListMeta} - for _, item := range obj.(*v1alpha1.ClusterCIDRList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterCIDRs. -func (c *FakeClusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clustercidrsResource, opts)) -} - -// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *FakeClusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clustercidrsResource, clusterCIDR), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *FakeClusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clustercidrsResource, clusterCIDR), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs. -func (c *FakeClusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clustercidrsResource, name, opts), &v1alpha1.ClusterCIDR{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clustercidrsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterCIDRList{}) - return err -} - -// Patch applies the patch and returns the patched clusterCIDR. -func (c *FakeClusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustercidrsResource, name, pt, data, subresources...), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR. -func (c *FakeClusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) { - if clusterCIDR == nil { - return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil") - } - data, err := json.Marshal(clusterCIDR) - if err != nil { - return nil, err - } - name := clusterCIDR.Name - if name == nil { - return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustercidrsResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go index 2d063836b5..80ad184bbf 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go @@ -28,14 +28,14 @@ type FakeNetworkingV1alpha1 struct { *testing.Fake } -func (c *FakeNetworkingV1alpha1) ClusterCIDRs() v1alpha1.ClusterCIDRInterface { - return &FakeClusterCIDRs{c} -} - func (c *FakeNetworkingV1alpha1) IPAddresses() v1alpha1.IPAddressInterface { return &FakeIPAddresses{c} } +func (c *FakeNetworkingV1alpha1) ServiceCIDRs() v1alpha1.ServiceCIDRInterface { + return &FakeServiceCIDRs{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeNetworkingV1alpha1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go new file mode 100644 index 0000000000..653ef631af --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// FakeServiceCIDRs implements ServiceCIDRInterface +type FakeServiceCIDRs struct { + Fake *FakeNetworkingV1alpha1 +} + +var servicecidrsResource = v1alpha1.SchemeGroupVersion.WithResource("servicecidrs") + +var servicecidrsKind = v1alpha1.SchemeGroupVersion.WithKind("ServiceCIDR") + +// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. +func (c *FakeServiceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(servicecidrsResource, name), &v1alpha1.ServiceCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServiceCIDR), err +} + +// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. +func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(servicecidrsResource, servicecidrsKind, opts), &v1alpha1.ServiceCIDRList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ServiceCIDRList{ListMeta: obj.(*v1alpha1.ServiceCIDRList).ListMeta} + for _, item := range obj.(*v1alpha1.ServiceCIDRList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested serviceCIDRs. +func (c *FakeServiceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(servicecidrsResource, opts)) +} + +// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. +func (c *FakeServiceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(servicecidrsResource, serviceCIDR), &v1alpha1.ServiceCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServiceCIDR), err +} + +// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. +func (c *FakeServiceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(servicecidrsResource, serviceCIDR), &v1alpha1.ServiceCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServiceCIDR), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(servicecidrsResource, "status", serviceCIDR), &v1alpha1.ServiceCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServiceCIDR), err +} + +// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs. +func (c *FakeServiceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(servicecidrsResource, name, opts), &v1alpha1.ServiceCIDR{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(servicecidrsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.ServiceCIDRList{}) + return err +} + +// Patch applies the patch and returns the patched serviceCIDR. +func (c *FakeServiceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, name, pt, data, subresources...), &v1alpha1.ServiceCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServiceCIDR), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR. +func (c *FakeServiceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { + if serviceCIDR == nil { + return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") + } + data, err := json.Marshal(serviceCIDR) + if err != nil { + return nil, err + } + name := serviceCIDR.Name + if name == nil { + return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, *name, types.ApplyPatchType, data), &v1alpha1.ServiceCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServiceCIDR), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeServiceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { + if serviceCIDR == nil { + return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") + } + data, err := json.Marshal(serviceCIDR) + if err != nil { + return nil, err + } + name := serviceCIDR.Name + if name == nil { + return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.ServiceCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServiceCIDR), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go index 9c2979d6c4..df12a463da 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go @@ -18,6 +18,6 @@ limitations under the License. package v1alpha1 -type ClusterCIDRExpansion interface{} - type IPAddressExpansion interface{} + +type ServiceCIDRExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go index 884c846f59..c730e62468 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go @@ -28,8 +28,8 @@ import ( type NetworkingV1alpha1Interface interface { RESTClient() rest.Interface - ClusterCIDRsGetter IPAddressesGetter + ServiceCIDRsGetter } // NetworkingV1alpha1Client is used to interact with features provided by the networking.k8s.io group. @@ -37,14 +37,14 @@ type NetworkingV1alpha1Client struct { restClient rest.Interface } -func (c *NetworkingV1alpha1Client) ClusterCIDRs() ClusterCIDRInterface { - return newClusterCIDRs(c) -} - func (c *NetworkingV1alpha1Client) IPAddresses() IPAddressInterface { return newIPAddresses(c) } +func (c *NetworkingV1alpha1Client) ServiceCIDRs() ServiceCIDRInterface { + return newServiceCIDRs(c) +} + // NewForConfig creates a new NetworkingV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go new file mode 100644 index 0000000000..100f290a19 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go @@ -0,0 +1,243 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ServiceCIDRsGetter has a method to return a ServiceCIDRInterface. +// A group's client should implement this interface. +type ServiceCIDRsGetter interface { + ServiceCIDRs() ServiceCIDRInterface +} + +// ServiceCIDRInterface has methods to work with ServiceCIDR resources. +type ServiceCIDRInterface interface { + Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (*v1alpha1.ServiceCIDR, error) + Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) + UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ServiceCIDR, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ServiceCIDRList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) + Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) + ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) + ServiceCIDRExpansion +} + +// serviceCIDRs implements ServiceCIDRInterface +type serviceCIDRs struct { + client rest.Interface +} + +// newServiceCIDRs returns a ServiceCIDRs +func newServiceCIDRs(c *NetworkingV1alpha1Client) *serviceCIDRs { + return &serviceCIDRs{ + client: c.RESTClient(), + } +} + +// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. +func (c *serviceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) { + result = &v1alpha1.ServiceCIDR{} + err = c.client.Get(). + Resource("servicecidrs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. +func (c *serviceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ServiceCIDRList{} + err = c.client.Get(). + Resource("servicecidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceCIDRs. +func (c *serviceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("servicecidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. +func (c *serviceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) { + result = &v1alpha1.ServiceCIDR{} + err = c.client.Post(). + Resource("servicecidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCIDR). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. +func (c *serviceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { + result = &v1alpha1.ServiceCIDR{} + err = c.client.Put(). + Resource("servicecidrs"). + Name(serviceCIDR.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCIDR). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *serviceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { + result = &v1alpha1.ServiceCIDR{} + err = c.client.Put(). + Resource("servicecidrs"). + Name(serviceCIDR.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCIDR). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs. +func (c *serviceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("servicecidrs"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serviceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("servicecidrs"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched serviceCIDR. +func (c *serviceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) { + result = &v1alpha1.ServiceCIDR{} + err = c.client.Patch(pt). + Resource("servicecidrs"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR. +func (c *serviceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { + if serviceCIDR == nil { + return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(serviceCIDR) + if err != nil { + return nil, err + } + name := serviceCIDR.Name + if name == nil { + return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") + } + result = &v1alpha1.ServiceCIDR{} + err = c.client.Patch(types.ApplyPatchType). + Resource("servicecidrs"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *serviceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { + if serviceCIDR == nil { + return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(serviceCIDR) + if err != nil { + return nil, err + } + + name := serviceCIDR.Name + if name == nil { + return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") + } + + result = &v1alpha1.ServiceCIDR{} + err = c.client.Patch(types.ApplyPatchType). + Resource("servicecidrs"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go deleted file mode 100644 index ade1aab7f0..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/policy/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakePodSecurityPolicies implements PodSecurityPolicyInterface -type FakePodSecurityPolicies struct { - Fake *FakePolicyV1beta1 -} - -var podsecuritypoliciesResource = v1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies") - -var podsecuritypoliciesKind = v1beta1.SchemeGroupVersion.WithKind("PodSecurityPolicy") - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *FakePodSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *FakePodSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(podsecuritypoliciesResource, podsecuritypoliciesKind, opts), &v1beta1.PodSecurityPolicyList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.PodSecurityPolicyList{ListMeta: obj.(*v1beta1.PodSecurityPolicyList).ListMeta} - for _, item := range obj.(*v1beta1.PodSecurityPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *FakePodSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(podsecuritypoliciesResource, opts)) -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *FakePodSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *FakePodSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *FakePodSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(podsecuritypoliciesResource, name, opts), &v1beta1.PodSecurityPolicy{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePodSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(podsecuritypoliciesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.PodSecurityPolicyList{}) - return err -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *FakePodSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, pt, data, subresources...), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSecurityPolicy. -func (c *FakePodSecurityPolicies) Apply(ctx context.Context, podSecurityPolicy *policyv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) { - if podSecurityPolicy == nil { - return nil, fmt.Errorf("podSecurityPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(podSecurityPolicy) - if err != nil { - return nil, err - } - name := podSecurityPolicy.Name - if name == nil { - return nil, fmt.Errorf("podSecurityPolicy.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, *name, types.ApplyPatchType, data), &v1beta1.PodSecurityPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodSecurityPolicy), err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go index 9c780bf1f0..90670b113f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go @@ -36,10 +36,6 @@ func (c *FakePolicyV1beta1) PodDisruptionBudgets(namespace string) v1beta1.PodDi return &FakePodDisruptionBudgets{c, namespace} } -func (c *FakePolicyV1beta1) PodSecurityPolicies() v1beta1.PodSecurityPolicyInterface { - return &FakePodSecurityPolicies{c} -} - // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakePolicyV1beta1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go index 078c16d5cb..6fce70c4eb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go @@ -19,5 +19,3 @@ limitations under the License. package v1beta1 type PodDisruptionBudgetExpansion interface{} - -type PodSecurityPolicyExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 944b61de47..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1beta1 "k8s.io/api/policy/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. -// A group's client should implement this interface. -type PodSecurityPoliciesGetter interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. -type PodSecurityPolicyInterface interface { - Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error) - Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) - Apply(ctx context.Context, podSecurityPolicy *policyv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) - PodSecurityPolicyExpansion -} - -// podSecurityPolicies implements PodSecurityPolicyInterface -type podSecurityPolicies struct { - client rest.Interface -} - -// newPodSecurityPolicies returns a PodSecurityPolicies -func newPodSecurityPolicies(c *PolicyV1beta1Client) *podSecurityPolicies { - return &podSecurityPolicies{ - client: c.RESTClient(), - } -} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Post(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Put(). - Resource("podsecuritypolicies"). - Name(podSecurityPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("podsecuritypolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(pt). - Resource("podsecuritypolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSecurityPolicy. -func (c *podSecurityPolicies) Apply(ctx context.Context, podSecurityPolicy *policyv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) { - if podSecurityPolicy == nil { - return nil, fmt.Errorf("podSecurityPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSecurityPolicy) - if err != nil { - return nil, err - } - name := podSecurityPolicy.Name - if name == nil { - return nil, fmt.Errorf("podSecurityPolicy.Name must be provided to Apply") - } - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("podsecuritypolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go index 5b65c9c0aa..fdb5093216 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -30,7 +30,6 @@ type PolicyV1beta1Interface interface { RESTClient() rest.Interface EvictionsGetter PodDisruptionBudgetsGetter - PodSecurityPoliciesGetter } // PolicyV1beta1Client is used to interact with features provided by the policy group. @@ -46,10 +45,6 @@ func (c *PolicyV1beta1Client) PodDisruptionBudgets(namespace string) PodDisrupti return newPodDisruptionBudgets(c, namespace) } -func (c *PolicyV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicies(c) -} - // NewForConfig creates a new PolicyV1beta1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go index c26190aa01..0e078f3486 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go @@ -36,6 +36,10 @@ func (c *FakeStorageV1alpha1) VolumeAttachments() v1alpha1.VolumeAttachmentInter return &FakeVolumeAttachments{c} } +func (c *FakeStorageV1alpha1) VolumeAttributesClasses() v1alpha1.VolumeAttributesClassInterface { + return &FakeVolumeAttributesClasses{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeStorageV1alpha1) RESTClient() rest.Interface { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go new file mode 100644 index 0000000000..d25263df48 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go @@ -0,0 +1,145 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeAttributesClasses implements VolumeAttributesClassInterface +type FakeVolumeAttributesClasses struct { + Fake *FakeStorageV1alpha1 +} + +var volumeattributesclassesResource = v1alpha1.SchemeGroupVersion.WithResource("volumeattributesclasses") + +var volumeattributesclassesKind = v1alpha1.SchemeGroupVersion.WithKind("VolumeAttributesClass") + +// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. +func (c *FakeVolumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(volumeattributesclassesResource, name), &v1alpha1.VolumeAttributesClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttributesClass), err +} + +// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. +func (c *FakeVolumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttributesClassList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(volumeattributesclassesResource, volumeattributesclassesKind, opts), &v1alpha1.VolumeAttributesClassList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.VolumeAttributesClassList{ListMeta: obj.(*v1alpha1.VolumeAttributesClassList).ListMeta} + for _, item := range obj.(*v1alpha1.VolumeAttributesClassList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeAttributesClasses. +func (c *FakeVolumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(volumeattributesclassesResource, opts)) +} + +// Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. +func (c *FakeVolumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(volumeattributesclassesResource, volumeAttributesClass), &v1alpha1.VolumeAttributesClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttributesClass), err +} + +// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. +func (c *FakeVolumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(volumeattributesclassesResource, volumeAttributesClass), &v1alpha1.VolumeAttributesClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttributesClass), err +} + +// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs. +func (c *FakeVolumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(volumeattributesclassesResource, name, opts), &v1alpha1.VolumeAttributesClass{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(volumeattributesclassesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttributesClassList{}) + return err +} + +// Patch applies the patch and returns the patched volumeAttributesClass. +func (c *FakeVolumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(volumeattributesclassesResource, name, pt, data, subresources...), &v1alpha1.VolumeAttributesClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttributesClass), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass. +func (c *FakeVolumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + if volumeAttributesClass == nil { + return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil") + } + data, err := json.Marshal(volumeAttributesClass) + if err != nil { + return nil, err + } + name := volumeAttributesClass.Name + if name == nil { + return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(volumeattributesclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.VolumeAttributesClass{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VolumeAttributesClass), err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go index 0f51c85f9e..436e910f24 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go @@ -21,3 +21,5 @@ package v1alpha1 type CSIStorageCapacityExpansion interface{} type VolumeAttachmentExpansion interface{} + +type VolumeAttributesClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go index c9bf11d766..63e3fc243f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -30,6 +30,7 @@ type StorageV1alpha1Interface interface { RESTClient() rest.Interface CSIStorageCapacitiesGetter VolumeAttachmentsGetter + VolumeAttributesClassesGetter } // StorageV1alpha1Client is used to interact with features provided by the storage.k8s.io group. @@ -45,6 +46,10 @@ func (c *StorageV1alpha1Client) VolumeAttachments() VolumeAttachmentInterface { return newVolumeAttachments(c) } +func (c *StorageV1alpha1Client) VolumeAttributesClasses() VolumeAttributesClassInterface { + return newVolumeAttributesClasses(c) +} + // NewForConfig creates a new StorageV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go new file mode 100644 index 0000000000..6633a4dc15 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// VolumeAttributesClassesGetter has a method to return a VolumeAttributesClassInterface. +// A group's client should implement this interface. +type VolumeAttributesClassesGetter interface { + VolumeAttributesClasses() VolumeAttributesClassInterface +} + +// VolumeAttributesClassInterface has methods to work with VolumeAttributesClass resources. +type VolumeAttributesClassInterface interface { + Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (*v1alpha1.VolumeAttributesClass, error) + Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (*v1alpha1.VolumeAttributesClass, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VolumeAttributesClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VolumeAttributesClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) + Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) + VolumeAttributesClassExpansion +} + +// volumeAttributesClasses implements VolumeAttributesClassInterface +type volumeAttributesClasses struct { + client rest.Interface +} + +// newVolumeAttributesClasses returns a VolumeAttributesClasses +func newVolumeAttributesClasses(c *StorageV1alpha1Client) *volumeAttributesClasses { + return &volumeAttributesClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. +func (c *volumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + result = &v1alpha1.VolumeAttributesClass{} + err = c.client.Get(). + Resource("volumeattributesclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. +func (c *volumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttributesClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.VolumeAttributesClassList{} + err = c.client.Get(). + Resource("volumeattributesclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeAttributesClasses. +func (c *volumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("volumeattributesclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. +func (c *volumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + result = &v1alpha1.VolumeAttributesClass{} + err = c.client.Post(). + Resource("volumeattributesclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(volumeAttributesClass). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. +func (c *volumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + result = &v1alpha1.VolumeAttributesClass{} + err = c.client.Put(). + Resource("volumeattributesclasses"). + Name(volumeAttributesClass.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(volumeAttributesClass). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs. +func (c *volumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumeattributesclasses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("volumeattributesclasses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched volumeAttributesClass. +func (c *volumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) { + result = &v1alpha1.VolumeAttributesClass{} + err = c.client.Patch(pt). + Resource("volumeattributesclasses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass. +func (c *volumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) { + if volumeAttributesClass == nil { + return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(volumeAttributesClass) + if err != nil { + return nil, err + } + name := volumeAttributesClass.Name + if name == nil { + return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") + } + result = &v1alpha1.VolumeAttributesClass{} + err = c.client.Patch(types.ApplyPatchType). + Resource("volumeattributesclasses"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go index 7ab3cd46fe..ca517a01d4 100644 --- a/vendor/k8s.io/client-go/restmapper/shortcut.go +++ b/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -17,6 +17,7 @@ limitations under the License. package restmapper import ( + "fmt" "strings" "k8s.io/klog/v2" @@ -32,13 +33,15 @@ type shortcutExpander struct { RESTMapper meta.RESTMapper discoveryClient discovery.DiscoveryInterface + + warningHandler func(string) } var _ meta.ResettableRESTMapper = shortcutExpander{} // NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery -func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) meta.RESTMapper { - return shortcutExpander{RESTMapper: delegate, discoveryClient: client} +func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface, warningHandler func(string)) meta.RESTMapper { + return shortcutExpander{RESTMapper: delegate, discoveryClient: client, warningHandler: warningHandler} } // KindFor fulfills meta.RESTMapper @@ -145,16 +148,37 @@ func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionRes } } + found := false + var rsc schema.GroupVersionResource + warnedAmbiguousShortcut := make(map[schema.GroupResource]bool) for _, item := range shortcutResources { if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group { continue } if resource.Resource == item.ShortForm.Resource { - resource.Resource = item.LongForm.Resource - resource.Group = item.LongForm.Group - return resource + if found { + if item.LongForm.Group == rsc.Group && item.LongForm.Resource == rsc.Resource { + // It is common and acceptable that group/resource has multiple + // versions registered in cluster. This does not introduce ambiguity + // in terms of shortname usage. + continue + } + if !warnedAmbiguousShortcut[item.LongForm] { + if e.warningHandler != nil { + e.warningHandler(fmt.Sprintf("short name %q could also match lower priority resource %s", resource.Resource, item.LongForm.String())) + } + warnedAmbiguousShortcut[item.LongForm] = true + } + continue + } + rsc.Resource = item.LongForm.Resource + rsc.Group = item.LongForm.Group + found = true } } + if found { + return rsc + } // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling if len(resource.Group) == 0 { diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 45eaff5285..c1ea13de57 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -334,12 +334,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { return nil } if err != nil { - if !apierrors.IsInvalid(err) { - return err - } - klog.Warning("the watch-list feature is not supported by the server, falling back to the previous LIST/WATCH semantic") + klog.Warningf("The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = %v", err) fallbackToList = true - // Ensure that we won't accidentally pass some garbage down the watch. + // ensure that we won't accidentally pass some garbage down the watch. w = nil } } @@ -351,6 +348,8 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { } } + klog.V(2).Infof("Caches populated for %v from %s", r.typeDescription, r.name) + resyncerrc := make(chan error, 1) cancelCh := make(chan struct{}) defer close(cancelCh) @@ -395,6 +394,11 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors select { case <-stopCh: + // we can only end up here when the stopCh + // was closed after a successful watchlist or list request + if w != nil { + w.Stop() + } return nil default: } @@ -670,6 +674,12 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { // "k8s.io/initial-events-end" bookmark. initTrace.Step("Objects streamed", trace.Field{Key: "count", Value: len(temporaryStore.List())}) r.setIsLastSyncResourceVersionUnavailable(false) + + // we utilize the temporaryStore to ensure independence from the current store implementation. + // as of today, the store is implemented as a queue and will be drained by the higher-level + // component as soon as it finishes replacing the content. + checkWatchListConsistencyIfRequested(stopCh, r.name, resourceVersion, r.listerWatcher, temporaryStore) + if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { return nil, fmt.Errorf("unable to sync watch-list result: %v", err) } @@ -762,7 +772,7 @@ loop: } case watch.Bookmark: // A `Bookmark` means watch has synced here, just update the resourceVersion - if _, ok := meta.GetAnnotations()["k8s.io/initial-events-end"]; ok { + if meta.GetAnnotations()["k8s.io/initial-events-end"] == "true" { if exitOnInitialEventsEndBookmark != nil { *exitOnInitialEventsEndBookmark = true } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go new file mode 100644 index 0000000000..aa3027d714 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go @@ -0,0 +1,119 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "os" + "sort" + "strconv" + "time" + + "github.com/google/go-cmp/cmp" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +var dataConsistencyDetectionEnabled = false + +func init() { + dataConsistencyDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR")) +} + +// checkWatchListConsistencyIfRequested performs a data consistency check only when +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +// +// The consistency check is meant to be enforced only in the CI, not in production. +// The check ensures that data retrieved by the watch-list api call +// is exactly the same as data received by the standard list api call. +// +// Note that this function will panic when data inconsistency is detected. +// This is intentional because we want to catch it in the CI. +func checkWatchListConsistencyIfRequested(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { + if !dataConsistencyDetectionEnabled { + return + } + checkWatchListConsistency(stopCh, identity, lastSyncedResourceVersion, listerWatcher, store) +} + +// checkWatchListConsistency exists solely for testing purposes. +// we cannot use checkWatchListConsistencyIfRequested because +// it is guarded by an environmental variable. +// we cannot manipulate the environmental variable because +// it will affect other tests in this package. +func checkWatchListConsistency(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { + klog.Warningf("%s: data consistency check for the watch-list feature is enabled, this will result in an additional call to the API server.", identity) + opts := metav1.ListOptions{ + ResourceVersion: lastSyncedResourceVersion, + ResourceVersionMatch: metav1.ResourceVersionMatchExact, + } + var list runtime.Object + err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), time.Second, true, func(_ context.Context) (done bool, err error) { + list, err = listerWatcher.List(opts) + if err != nil { + // the consistency check will only be enabled in the CI + // and LIST calls in general will be retired by the client-go library + // if we fail simply log and retry + klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err) + return false, nil + } + return true, nil + }) + if err != nil { + klog.Errorf("failed to list data from the server, the watch-list consistency check won't be performed, stopCh was closed, err: %v", err) + return + } + + rawListItems, err := meta.ExtractListWithAlloc(list) + if err != nil { + panic(err) // this should never happen + } + + listItems := toMetaObjectSliceOrDie(rawListItems) + storeItems := toMetaObjectSliceOrDie(store.List()) + + sort.Sort(byUID(listItems)) + sort.Sort(byUID(storeItems)) + + if !cmp.Equal(listItems, storeItems) { + klog.Infof("%s: data received by the new watch-list api call is different than received by the standard list api call, diff: %v", identity, cmp.Diff(listItems, storeItems)) + msg := "data inconsistency detected for the watch-list feature, panicking!" + panic(msg) + } +} + +type byUID []metav1.Object + +func (a byUID) Len() int { return len(a) } +func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() } +func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object { + result := make([]metav1.Object, len(s)) + for i, v := range s { + m, err := meta.Accessor(v) + if err != nil { + panic(err) + } + result[i] = m + } + return result +} diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index be8694ddb6..b3f37431d5 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -334,11 +334,9 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool }, stopCh) if err != nil { - klog.V(2).Infof("stop requested") return false } - klog.V(4).Infof("caches populated") return true } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go index 10744156b8..0fc2fd0a0c 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go @@ -49,12 +49,12 @@ type InClusterConfig interface { Possible() bool } -// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name +// NewNonInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}} } -// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader +// NewInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name and the fallback auth reader func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader} } diff --git a/vendor/k8s.io/client-go/tools/remotecommand/fallback.go b/vendor/k8s.io/client-go/tools/remotecommand/fallback.go new file mode 100644 index 0000000000..4846cdb550 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/fallback.go @@ -0,0 +1,57 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "context" +) + +var _ Executor = &fallbackExecutor{} + +type fallbackExecutor struct { + primary Executor + secondary Executor + shouldFallback func(error) bool +} + +// NewFallbackExecutor creates an Executor that first attempts to use the +// WebSocketExecutor, falling back to the legacy SPDYExecutor if the initial +// websocket "StreamWithContext" call fails. +// func NewFallbackExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { +func NewFallbackExecutor(primary, secondary Executor, shouldFallback func(error) bool) (Executor, error) { + return &fallbackExecutor{ + primary: primary, + secondary: secondary, + shouldFallback: shouldFallback, + }, nil +} + +// Stream is deprecated. Please use "StreamWithContext". +func (f *fallbackExecutor) Stream(options StreamOptions) error { + return f.StreamWithContext(context.Background(), options) +} + +// StreamWithContext initially attempts to call "StreamWithContext" using the +// primary executor, falling back to calling the secondary executor if the +// initial primary call to upgrade to a websocket connection fails. +func (f *fallbackExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { + err := f.primary.StreamWithContext(ctx, options) + if f.shouldFallback(err) { + return f.secondary.StreamWithContext(ctx, options) + } + return err +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go index 662a3cb4ac..1ae67729be 100644 --- a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go +++ b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go @@ -18,17 +18,10 @@ package remotecommand import ( "context" - "fmt" "io" "net/http" - "net/url" - - "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/util/httpstream" - "k8s.io/apimachinery/pkg/util/remotecommand" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/transport/spdy" ) // StreamOptions holds information pertaining to the current streaming session: @@ -63,120 +56,3 @@ type streamCreator interface { type streamProtocolHandler interface { stream(conn streamCreator) error } - -// streamExecutor handles transporting standard shell streams over an httpstream connection. -type streamExecutor struct { - upgrader spdy.Upgrader - transport http.RoundTripper - - method string - url *url.URL - protocols []string -} - -// NewSPDYExecutor connects to the provided server and upgrades the connection to -// multiplexed bidirectional streams. -func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { - wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) - if err != nil { - return nil, err - } - return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url) -} - -// NewSPDYExecutorForTransports connects to the provided server using the given transport, -// upgrades the response using the given upgrader to multiplexed bidirectional streams. -func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { - return NewSPDYExecutorForProtocols( - transport, upgrader, method, url, - remotecommand.StreamProtocolV4Name, - remotecommand.StreamProtocolV3Name, - remotecommand.StreamProtocolV2Name, - remotecommand.StreamProtocolV1Name, - ) -} - -// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to -// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most -// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports. -func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) { - return &streamExecutor{ - upgrader: upgrader, - transport: transport, - method: method, - url: url, - protocols: protocols, - }, nil -} - -// Stream opens a protocol streamer to the server and streams until a client closes -// the connection or the server disconnects. -func (e *streamExecutor) Stream(options StreamOptions) error { - return e.StreamWithContext(context.Background(), options) -} - -// newConnectionAndStream creates a new SPDY connection and a stream protocol handler upon it. -func (e *streamExecutor) newConnectionAndStream(ctx context.Context, options StreamOptions) (httpstream.Connection, streamProtocolHandler, error) { - req, err := http.NewRequestWithContext(ctx, e.method, e.url.String(), nil) - if err != nil { - return nil, nil, fmt.Errorf("error creating request: %v", err) - } - - conn, protocol, err := spdy.Negotiate( - e.upgrader, - &http.Client{Transport: e.transport}, - req, - e.protocols..., - ) - if err != nil { - return nil, nil, err - } - - var streamer streamProtocolHandler - - switch protocol { - case remotecommand.StreamProtocolV4Name: - streamer = newStreamProtocolV4(options) - case remotecommand.StreamProtocolV3Name: - streamer = newStreamProtocolV3(options) - case remotecommand.StreamProtocolV2Name: - streamer = newStreamProtocolV2(options) - case "": - klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) - fallthrough - case remotecommand.StreamProtocolV1Name: - streamer = newStreamProtocolV1(options) - } - - return conn, streamer, nil -} - -// StreamWithContext opens a protocol streamer to the server and streams until a client closes -// the connection or the server disconnects or the context is done. -func (e *streamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { - conn, streamer, err := e.newConnectionAndStream(ctx, options) - if err != nil { - return err - } - defer conn.Close() - - panicChan := make(chan any, 1) - errorChan := make(chan error, 1) - go func() { - defer func() { - if p := recover(); p != nil { - panicChan <- p - } - }() - errorChan <- streamer.stream(conn) - }() - - select { - case p := <-panicChan: - panic(p) - case err := <-errorChan: - return err - case <-ctx.Done(): - return ctx.Err() - } -} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/spdy.go b/vendor/k8s.io/client-go/tools/remotecommand/spdy.go new file mode 100644 index 0000000000..c2bfcf8a65 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/spdy.go @@ -0,0 +1,171 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/remotecommand" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/transport/spdy" + "k8s.io/klog/v2" +) + +// spdyStreamExecutor handles transporting standard shell streams over an httpstream connection. +type spdyStreamExecutor struct { + upgrader spdy.Upgrader + transport http.RoundTripper + + method string + url *url.URL + protocols []string + rejectRedirects bool // if true, receiving redirect from upstream is an error +} + +// NewSPDYExecutor connects to the provided server and upgrades the connection to +// multiplexed bidirectional streams. +func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { + wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url) +} + +// NewSPDYExecutorRejectRedirects returns an Executor that will upgrade the future +// connection to a SPDY bi-directional streaming connection when calling "Stream" (deprecated) +// or "StreamWithContext" (preferred). Additionally, if the upstream server returns a redirect +// during the attempted upgrade in these "Stream" calls, an error is returned. +func NewSPDYExecutorRejectRedirects(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { + executor, err := NewSPDYExecutorForTransports(transport, upgrader, method, url) + if err != nil { + return nil, err + } + spdyExecutor := executor.(*spdyStreamExecutor) + spdyExecutor.rejectRedirects = true + return spdyExecutor, nil +} + +// NewSPDYExecutorForTransports connects to the provided server using the given transport, +// upgrades the response using the given upgrader to multiplexed bidirectional streams. +func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { + return NewSPDYExecutorForProtocols( + transport, upgrader, method, url, + remotecommand.StreamProtocolV5Name, + remotecommand.StreamProtocolV4Name, + remotecommand.StreamProtocolV3Name, + remotecommand.StreamProtocolV2Name, + remotecommand.StreamProtocolV1Name, + ) +} + +// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to +// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most +// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports. +func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) { + return &spdyStreamExecutor{ + upgrader: upgrader, + transport: transport, + method: method, + url: url, + protocols: protocols, + }, nil +} + +// Stream opens a protocol streamer to the server and streams until a client closes +// the connection or the server disconnects. +func (e *spdyStreamExecutor) Stream(options StreamOptions) error { + return e.StreamWithContext(context.Background(), options) +} + +// newConnectionAndStream creates a new SPDY connection and a stream protocol handler upon it. +func (e *spdyStreamExecutor) newConnectionAndStream(ctx context.Context, options StreamOptions) (httpstream.Connection, streamProtocolHandler, error) { + req, err := http.NewRequestWithContext(ctx, e.method, e.url.String(), nil) + if err != nil { + return nil, nil, fmt.Errorf("error creating request: %v", err) + } + + client := http.Client{Transport: e.transport} + if e.rejectRedirects { + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return fmt.Errorf("redirect not allowed") + } + } + conn, protocol, err := spdy.Negotiate( + e.upgrader, + &client, + req, + e.protocols..., + ) + if err != nil { + return nil, nil, err + } + + var streamer streamProtocolHandler + + switch protocol { + case remotecommand.StreamProtocolV5Name: + streamer = newStreamProtocolV5(options) + case remotecommand.StreamProtocolV4Name: + streamer = newStreamProtocolV4(options) + case remotecommand.StreamProtocolV3Name: + streamer = newStreamProtocolV3(options) + case remotecommand.StreamProtocolV2Name: + streamer = newStreamProtocolV2(options) + case "": + klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) + fallthrough + case remotecommand.StreamProtocolV1Name: + streamer = newStreamProtocolV1(options) + } + + return conn, streamer, nil +} + +// StreamWithContext opens a protocol streamer to the server and streams until a client closes +// the connection or the server disconnects or the context is done. +func (e *spdyStreamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { + conn, streamer, err := e.newConnectionAndStream(ctx, options) + if err != nil { + return err + } + defer conn.Close() + + panicChan := make(chan any, 1) + errorChan := make(chan error, 1) + go func() { + defer func() { + if p := recover(); p != nil { + panicChan <- p + } + }() + errorChan <- streamer.stream(conn) + }() + + select { + case p := <-panicChan: + panic(p) + case err := <-errorChan: + return err + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v5.go b/vendor/k8s.io/client-go/tools/remotecommand/v5.go new file mode 100644 index 0000000000..4da7bfb139 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v5.go @@ -0,0 +1,35 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +// streamProtocolV5 add support for V5 of the remote command subprotocol. +// For the streamProtocolHandler, this version is the same as V4. +type streamProtocolV5 struct { + *streamProtocolV4 +} + +var _ streamProtocolHandler = &streamProtocolV5{} + +func newStreamProtocolV5(options StreamOptions) streamProtocolHandler { + return &streamProtocolV5{ + streamProtocolV4: newStreamProtocolV4(options).(*streamProtocolV4), + } +} + +func (p *streamProtocolV5) stream(conn streamCreator) error { + return p.streamProtocolV4.stream(conn) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/websocket.go b/vendor/k8s.io/client-go/tools/remotecommand/websocket.go new file mode 100644 index 0000000000..a60986decc --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/websocket.go @@ -0,0 +1,502 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + gwebsocket "github.com/gorilla/websocket" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/remotecommand" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/transport/websocket" + "k8s.io/klog/v2" +) + +// writeDeadline defines the time that a write to the websocket connection +// must complete by, otherwise an i/o timeout occurs. The writeDeadline +// has nothing to do with a response from the other websocket connection +// endpoint; only that the message was successfully processed by the +// local websocket connection. The typical write deadline within the websocket +// library is one second. +const writeDeadline = 2 * time.Second + +var ( + _ Executor = &wsStreamExecutor{} + _ streamCreator = &wsStreamCreator{} + _ httpstream.Stream = &stream{} + + streamType2streamID = map[string]byte{ + v1.StreamTypeStdin: remotecommand.StreamStdIn, + v1.StreamTypeStdout: remotecommand.StreamStdOut, + v1.StreamTypeStderr: remotecommand.StreamStdErr, + v1.StreamTypeError: remotecommand.StreamErr, + v1.StreamTypeResize: remotecommand.StreamResize, + } +) + +const ( + // pingPeriod defines how often a heartbeat "ping" message is sent. + pingPeriod = 5 * time.Second + // pingReadDeadline defines the time waiting for a response heartbeat + // "pong" message before a timeout error occurs for websocket reading. + // This duration must always be greater than the "pingPeriod". By defining + // this deadline in terms of the ping period, we are essentially saying + // we can drop "X-1" (e.g. 3-1=2) pings before firing the timeout. + pingReadDeadline = (pingPeriod * 3) + (1 * time.Second) +) + +// wsStreamExecutor handles transporting standard shell streams over an httpstream connection. +type wsStreamExecutor struct { + transport http.RoundTripper + upgrader websocket.ConnectionHolder + method string + url string + // requested protocols in priority order (e.g. v5.channel.k8s.io before v4.channel.k8s.io). + protocols []string + // selected protocol from the handshake process; could be empty string if handshake fails. + negotiated string + // period defines how often a "ping" heartbeat message is sent to the other endpoint. + heartbeatPeriod time.Duration + // deadline defines the amount of time before "pong" response must be received. + heartbeatDeadline time.Duration +} + +func NewWebSocketExecutor(config *restclient.Config, method, url string) (Executor, error) { + // Only supports V5 protocol for correct version skew functionality. + // Previous api servers will proxy upgrade requests to legacy websocket + // servers on container runtimes which support V1-V4. These legacy + // websocket servers will not handle the new CLOSE signal. + return NewWebSocketExecutorForProtocols(config, method, url, remotecommand.StreamProtocolV5Name) +} + +// NewWebSocketExecutorForProtocols allows to execute commands via a WebSocket connection. +func NewWebSocketExecutorForProtocols(config *restclient.Config, method, url string, protocols ...string) (Executor, error) { + transport, upgrader, err := websocket.RoundTripperFor(config) + if err != nil { + return nil, fmt.Errorf("error creating websocket transports: %v", err) + } + return &wsStreamExecutor{ + transport: transport, + upgrader: upgrader, + method: method, + url: url, + protocols: protocols, + heartbeatPeriod: pingPeriod, + heartbeatDeadline: pingReadDeadline, + }, nil +} + +// Deprecated: use StreamWithContext instead to avoid possible resource leaks. +// See https://github.com/kubernetes/kubernetes/pull/103177 for details. +func (e *wsStreamExecutor) Stream(options StreamOptions) error { + return e.StreamWithContext(context.Background(), options) +} + +// StreamWithContext upgrades an HTTPRequest to a WebSocket connection, and starts the various +// goroutines to implement the necessary streams over the connection. The "options" parameter +// defines which streams are requested. Returns an error if one occurred. This method is NOT +// safe to run concurrently with the same executor (because of the state stored in the upgrader). +func (e *wsStreamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { + req, err := http.NewRequestWithContext(ctx, e.method, e.url, nil) + if err != nil { + return err + } + conn, err := websocket.Negotiate(e.transport, e.upgrader, req, e.protocols...) + if err != nil { + return err + } + if conn == nil { + panic(fmt.Errorf("websocket connection is nil")) + } + defer conn.Close() + e.negotiated = conn.Subprotocol() + klog.V(4).Infof("The subprotocol is %s", e.negotiated) + + var streamer streamProtocolHandler + switch e.negotiated { + case remotecommand.StreamProtocolV5Name: + streamer = newStreamProtocolV5(options) + case remotecommand.StreamProtocolV4Name: + streamer = newStreamProtocolV4(options) + case remotecommand.StreamProtocolV3Name: + streamer = newStreamProtocolV3(options) + case remotecommand.StreamProtocolV2Name: + streamer = newStreamProtocolV2(options) + case "": + klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) + fallthrough + case remotecommand.StreamProtocolV1Name: + streamer = newStreamProtocolV1(options) + } + + panicChan := make(chan any, 1) + errorChan := make(chan error, 1) + go func() { + defer func() { + if p := recover(); p != nil { + panicChan <- p + } + }() + creator := newWSStreamCreator(conn) + go creator.readDemuxLoop( + e.upgrader.DataBufferSize(), + e.heartbeatPeriod, + e.heartbeatDeadline, + ) + errorChan <- streamer.stream(creator) + }() + + select { + case p := <-panicChan: + panic(p) + case err := <-errorChan: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +type wsStreamCreator struct { + conn *gwebsocket.Conn + // Protects writing to websocket connection; reading is lock-free + connWriteLock sync.Mutex + // map of stream id to stream; multiple streams read/write the connection + streams map[byte]*stream + streamsMu sync.Mutex +} + +func newWSStreamCreator(conn *gwebsocket.Conn) *wsStreamCreator { + return &wsStreamCreator{ + conn: conn, + streams: map[byte]*stream{}, + } +} + +func (c *wsStreamCreator) getStream(id byte) *stream { + c.streamsMu.Lock() + defer c.streamsMu.Unlock() + return c.streams[id] +} + +func (c *wsStreamCreator) setStream(id byte, s *stream) { + c.streamsMu.Lock() + defer c.streamsMu.Unlock() + c.streams[id] = s +} + +// CreateStream uses id from passed headers to create a stream over "c.conn" connection. +// Returns a Stream structure or nil and an error if one occurred. +func (c *wsStreamCreator) CreateStream(headers http.Header) (httpstream.Stream, error) { + streamType := headers.Get(v1.StreamType) + id, ok := streamType2streamID[streamType] + if !ok { + return nil, fmt.Errorf("unknown stream type: %s", streamType) + } + if s := c.getStream(id); s != nil { + return nil, fmt.Errorf("duplicate stream for type %s", streamType) + } + reader, writer := io.Pipe() + s := &stream{ + headers: headers, + readPipe: reader, + writePipe: writer, + conn: c.conn, + connWriteLock: &c.connWriteLock, + id: id, + } + c.setStream(id, s) + return s, nil +} + +// readDemuxLoop is the lock-free reading processor for this endpoint of the websocket +// connection. This loop reads the connection, and demultiplexes the data +// into one of the individual stream pipes (by checking the stream id). This +// loop can *not* be run concurrently, because there can only be one websocket +// connection reader at a time (a read mutex would provide no benefit). +func (c *wsStreamCreator) readDemuxLoop(bufferSize int, period time.Duration, deadline time.Duration) { + // Initialize and start the ping/pong heartbeat. + h := newHeartbeat(c.conn, period, deadline) + // Set initial timeout for websocket connection reading. + if err := c.conn.SetReadDeadline(time.Now().Add(deadline)); err != nil { + klog.Errorf("Websocket initial setting read deadline failed %v", err) + return + } + go h.start() + // Buffer size must correspond to the same size allocated + // for the read buffer during websocket client creation. A + // difference can cause incomplete connection reads. + readBuffer := make([]byte, bufferSize) + for { + // NextReader() only returns data messages (BinaryMessage or Text + // Message). Even though this call will never return control frames + // such as ping, pong, or close, this call is necessary for these + // message types to be processed. There can only be one reader + // at a time, so this reader loop must *not* be run concurrently; + // there is no lock for reading. Calling "NextReader()" before the + // current reader has been processed will close the current reader. + // If the heartbeat read deadline times out, this "NextReader()" will + // return an i/o error, and error handling will clean up. + messageType, r, err := c.conn.NextReader() + if err != nil { + websocketErr, ok := err.(*gwebsocket.CloseError) + if ok && websocketErr.Code == gwebsocket.CloseNormalClosure { + err = nil // readers will get io.EOF as it's a normal closure + } else { + err = fmt.Errorf("next reader: %w", err) + } + c.closeAllStreamReaders(err) + return + } + // All remote command protocols send/receive only binary data messages. + if messageType != gwebsocket.BinaryMessage { + c.closeAllStreamReaders(fmt.Errorf("unexpected message type: %d", messageType)) + return + } + // It's ok to read just a single byte because the underlying library wraps the actual + // connection with a buffered reader anyway. + _, err = io.ReadFull(r, readBuffer[:1]) + if err != nil { + c.closeAllStreamReaders(fmt.Errorf("read stream id: %w", err)) + return + } + streamID := readBuffer[0] + s := c.getStream(streamID) + if s == nil { + klog.Errorf("Unknown stream id %d, discarding message", streamID) + continue + } + for { + nr, errRead := r.Read(readBuffer) + if nr > 0 { + // Write the data to the stream's pipe. This can block. + _, errWrite := s.writePipe.Write(readBuffer[:nr]) + if errWrite != nil { + // Pipe must have been closed by the stream user. + // Nothing to do, discard the message. + break + } + } + if errRead != nil { + if errRead == io.EOF { + break + } + c.closeAllStreamReaders(fmt.Errorf("read message: %w", err)) + return + } + } + } +} + +// closeAllStreamReaders closes readers in all streams. +// This unblocks all stream.Read() calls. +func (c *wsStreamCreator) closeAllStreamReaders(err error) { + c.streamsMu.Lock() + defer c.streamsMu.Unlock() + for _, s := range c.streams { + // Closing writePipe unblocks all readPipe.Read() callers and prevents any future writes. + _ = s.writePipe.CloseWithError(err) + } +} + +type stream struct { + headers http.Header + readPipe *io.PipeReader + writePipe *io.PipeWriter + // conn is used for writing directly into the connection. + // Is nil after Close() / Reset() to prevent future writes. + conn *gwebsocket.Conn + // connWriteLock protects conn against concurrent write operations. There must be a single writer and a single reader only. + // The mutex is shared across all streams because the underlying connection is shared. + connWriteLock *sync.Mutex + id byte +} + +func (s *stream) Read(p []byte) (n int, err error) { + return s.readPipe.Read(p) +} + +// Write writes directly to the underlying WebSocket connection. +func (s *stream) Write(p []byte) (n int, err error) { + klog.V(4).Infof("Write() on stream %d", s.id) + defer klog.V(4).Infof("Write() done on stream %d", s.id) + s.connWriteLock.Lock() + defer s.connWriteLock.Unlock() + if s.conn == nil { + return 0, fmt.Errorf("write on closed stream %d", s.id) + } + err = s.conn.SetWriteDeadline(time.Now().Add(writeDeadline)) + if err != nil { + klog.V(7).Infof("Websocket setting write deadline failed %v", err) + return 0, err + } + // Message writer buffers the message data, so we don't need to do that ourselves. + // Just write id and the data as two separate writes to avoid allocating an intermediate buffer. + w, err := s.conn.NextWriter(gwebsocket.BinaryMessage) + if err != nil { + return 0, err + } + defer func() { + if w != nil { + w.Close() + } + }() + _, err = w.Write([]byte{s.id}) + if err != nil { + return 0, err + } + n, err = w.Write(p) + if err != nil { + return n, err + } + err = w.Close() + w = nil + return n, err +} + +// Close half-closes the stream, indicating this side is finished with the stream. +func (s *stream) Close() error { + klog.V(4).Infof("Close() on stream %d", s.id) + defer klog.V(4).Infof("Close() done on stream %d", s.id) + s.connWriteLock.Lock() + defer s.connWriteLock.Unlock() + if s.conn == nil { + return fmt.Errorf("Close() on already closed stream %d", s.id) + } + // Communicate the CLOSE stream signal to the other websocket endpoint. + err := s.conn.WriteMessage(gwebsocket.BinaryMessage, []byte{remotecommand.StreamClose, s.id}) + s.conn = nil + return err +} + +func (s *stream) Reset() error { + klog.V(4).Infof("Reset() on stream %d", s.id) + defer klog.V(4).Infof("Reset() done on stream %d", s.id) + s.Close() + return s.writePipe.Close() +} + +func (s *stream) Headers() http.Header { + return s.headers +} + +func (s *stream) Identifier() uint32 { + return uint32(s.id) +} + +// heartbeat encasulates data necessary for the websocket ping/pong heartbeat. This +// heartbeat works by setting a read deadline on the websocket connection, then +// pushing this deadline into the future for every successful heartbeat. If the +// heartbeat "pong" fails to respond within the deadline, then the "NextReader()" call +// inside the "readDemuxLoop" will return an i/o error prompting a connection close +// and cleanup. +type heartbeat struct { + conn *gwebsocket.Conn + // period defines how often a "ping" heartbeat message is sent to the other endpoint + period time.Duration + // closing the "closer" channel will clean up the heartbeat timers + closer chan struct{} + // optional data to send with "ping" message + message []byte + // optionally received data message with "pong" message, same as sent with ping + pongMessage []byte +} + +// newHeartbeat creates heartbeat structure encapsulating fields necessary to +// run the websocket connection ping/pong mechanism and sets up handlers on +// the websocket connection. +func newHeartbeat(conn *gwebsocket.Conn, period time.Duration, deadline time.Duration) *heartbeat { + h := &heartbeat{ + conn: conn, + period: period, + closer: make(chan struct{}), + } + // Set up handler for receiving returned "pong" message from other endpoint + // by pushing the read deadline into the future. The "msg" received could + // be empty. + h.conn.SetPongHandler(func(msg string) error { + // Push the read deadline into the future. + klog.V(8).Infof("Pong message received (%s)--resetting read deadline", msg) + err := h.conn.SetReadDeadline(time.Now().Add(deadline)) + if err != nil { + klog.Errorf("Websocket setting read deadline failed %v", err) + return err + } + if len(msg) > 0 { + h.pongMessage = []byte(msg) + } + return nil + }) + // Set up handler to cleanup timers when this endpoint receives "Close" message. + closeHandler := h.conn.CloseHandler() + h.conn.SetCloseHandler(func(code int, text string) error { + close(h.closer) + return closeHandler(code, text) + }) + return h +} + +// setMessage is optional data sent with "ping" heartbeat. According to the websocket RFC +// this data sent with "ping" message should be returned in "pong" message. +func (h *heartbeat) setMessage(msg string) { + h.message = []byte(msg) +} + +// start the heartbeat by setting up necesssary handlers and looping by sending "ping" +// message every "period" until the "closer" channel is closed. +func (h *heartbeat) start() { + // Loop to continually send "ping" message through websocket connection every "period". + t := time.NewTicker(h.period) + defer t.Stop() + for { + select { + case <-h.closer: + klog.V(8).Infof("closed channel--returning") + return + case <-t.C: + // "WriteControl" does not need to be protected by a mutex. According to + // gorilla/websockets library docs: "The Close and WriteControl methods can + // be called concurrently with all other methods." + if err := h.conn.WriteControl(gwebsocket.PingMessage, h.message, time.Now().Add(writeDeadline)); err == nil { + klog.V(8).Infof("Websocket Ping succeeeded") + } else { + klog.Errorf("Websocket Ping failed: %v", err) + if errors.Is(err, gwebsocket.ErrCloseSent) { + // we continue because c.conn.CloseChan will manage closing the connection already + continue + } else if e, ok := err.(net.Error); ok && e.Timeout() { + // Continue, in case this is a transient failure. + // c.conn.CloseChan above will tell us when the connection is + // actually closed. + // If Temporary function hadn't been deprecated, we would have used it. + // But most of temporary errors are timeout errors anyway. + continue + } + return + } + } + } +} diff --git a/vendor/k8s.io/client-go/transport/spdy/spdy.go b/vendor/k8s.io/client-go/transport/spdy/spdy.go index f50b68e5ff..9fddc6c5f2 100644 --- a/vendor/k8s.io/client-go/transport/spdy/spdy.go +++ b/vendor/k8s.io/client-go/transport/spdy/spdy.go @@ -43,11 +43,15 @@ func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, er if config.Proxy != nil { proxy = config.Proxy } - upgradeRoundTripper := spdy.NewRoundTripperWithConfig(spdy.RoundTripperConfig{ - TLS: tlsConfig, - Proxier: proxy, - PingPeriod: time.Second * 5, + upgradeRoundTripper, err := spdy.NewRoundTripperWithConfig(spdy.RoundTripperConfig{ + TLS: tlsConfig, + Proxier: proxy, + PingPeriod: time.Second * 5, + UpgradeTransport: nil, }) + if err != nil { + return nil, nil, err + } wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper) if err != nil { return nil, nil, err diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index 78060719a9..4770331a0e 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -96,6 +96,32 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { } if c.HasCA() { + /* + kubernetes mutual (2-way) x509 between client and apiserver: + + 1. apiserver sending its apiserver certificate along with its publickey to client + >2. client verifies the apiserver certificate sent against its cluster certificate authority data + 3. client sending its client certificate along with its public key to the apiserver + 4. apiserver verifies the client certificate sent against its cluster certificate authority data + + description: + here, with this block, + cluster certificate authority data gets loaded into TLS before the handshake process + for client to later during the handshake verify the apiserver certificate + + normal args related to this stage: + --certificate-authority='': + Path to a cert file for the certificate authority + + (retrievable from "kubectl options" command) + (suggested by @deads2k) + + see also: + - for the step 1, see: staging/src/k8s.io/apiserver/pkg/server/options/serving.go + - for the step 3, see: a few lines below in this file + - for the step 4, see: staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go + */ + rootCAs, err := rootCertPool(c.TLS.CAData) if err != nil { return nil, fmt.Errorf("unable to load root certificates: %w", err) @@ -121,6 +147,35 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { } if c.HasCertAuth() || c.HasCertCallback() { + + /* + kubernetes mutual (2-way) x509 between client and apiserver: + + 1. apiserver sending its apiserver certificate along with its publickey to client + 2. client verifies the apiserver certificate sent against its cluster certificate authority data + >3. client sending its client certificate along with its public key to the apiserver + 4. apiserver verifies the client certificate sent against its cluster certificate authority data + + description: + here, with this callback function, + client certificate and pub key get loaded into TLS during the handshake process + for apiserver to later in the step 4 verify the client certificate + + normal args related to this stage: + --client-certificate='': + Path to a client certificate file for TLS + --client-key='': + Path to a client key file for TLS + + (retrievable from "kubectl options" command) + (suggested by @deads2k) + + see also: + - for the step 1, see: staging/src/k8s.io/apiserver/pkg/server/options/serving.go + - for the step 2, see: a few lines above in this file + - for the step 4, see: staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go + */ + tlsConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { // Note: static key/cert data always take precedence over cert // callback. diff --git a/vendor/k8s.io/client-go/transport/websocket/roundtripper.go b/vendor/k8s.io/client-go/transport/websocket/roundtripper.go new file mode 100644 index 0000000000..010f916bc7 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/websocket/roundtripper.go @@ -0,0 +1,163 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package websocket + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + + gwebsocket "github.com/gorilla/websocket" + + "k8s.io/apimachinery/pkg/util/httpstream" + utilnet "k8s.io/apimachinery/pkg/util/net" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/transport" +) + +var ( + _ utilnet.TLSClientConfigHolder = &RoundTripper{} + _ http.RoundTripper = &RoundTripper{} +) + +// ConnectionHolder defines functions for structure providing +// access to the websocket connection. +type ConnectionHolder interface { + DataBufferSize() int + Connection() *gwebsocket.Conn +} + +// RoundTripper knows how to establish a connection to a remote WebSocket endpoint and make it available for use. +// RoundTripper must not be reused. +type RoundTripper struct { + // TLSConfig holds the TLS configuration settings to use when connecting + // to the remote server. + TLSConfig *tls.Config + + // Proxier specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxier func(req *http.Request) (*url.URL, error) + + // Conn holds the WebSocket connection after a round trip. + Conn *gwebsocket.Conn +} + +// Connection returns the stored websocket connection. +func (rt *RoundTripper) Connection() *gwebsocket.Conn { + return rt.Conn +} + +// DataBufferSize returns the size of buffers for the +// websocket connection. +func (rt *RoundTripper) DataBufferSize() int { + return 32 * 1024 +} + +// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder. +func (rt *RoundTripper) TLSClientConfig() *tls.Config { + return rt.TLSConfig +} + +// RoundTrip connects to the remote websocket using the headers in the request and the TLS +// configuration from the config +func (rt *RoundTripper) RoundTrip(request *http.Request) (retResp *http.Response, retErr error) { + defer func() { + if request.Body != nil { + err := request.Body.Close() + if retErr == nil { + retErr = err + } + } + }() + + // set the protocol version directly on the dialer from the header + protocolVersions := request.Header[httpstream.HeaderProtocolVersion] + delete(request.Header, httpstream.HeaderProtocolVersion) + + dialer := gwebsocket.Dialer{ + Proxy: rt.Proxier, + TLSClientConfig: rt.TLSConfig, + Subprotocols: protocolVersions, + ReadBufferSize: rt.DataBufferSize() + 1024, // add space for the protocol byte indicating which channel the data is for + WriteBufferSize: rt.DataBufferSize() + 1024, // add space for the protocol byte indicating which channel the data is for + } + switch request.URL.Scheme { + case "https": + request.URL.Scheme = "wss" + case "http": + request.URL.Scheme = "ws" + default: + return nil, fmt.Errorf("unknown url scheme: %s", request.URL.Scheme) + } + wsConn, resp, err := dialer.DialContext(request.Context(), request.URL.String(), request.Header) + if err != nil { + return nil, &httpstream.UpgradeFailureError{Cause: err} + } + + rt.Conn = wsConn + + return resp, nil +} + +// RoundTripperFor transforms the passed rest config into a wrapped roundtripper, as well +// as a pointer to the websocket RoundTripper. The websocket RoundTripper contains the +// websocket connection after RoundTrip() on the wrapper. Returns an error if there is +// a problem creating the round trippers. +func RoundTripperFor(config *restclient.Config) (http.RoundTripper, ConnectionHolder, error) { + transportCfg, err := config.TransportConfig() + if err != nil { + return nil, nil, err + } + tlsConfig, err := transport.TLSConfigFor(transportCfg) + if err != nil { + return nil, nil, err + } + proxy := config.Proxy + if proxy == nil { + proxy = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) + } + + upgradeRoundTripper := &RoundTripper{ + TLSConfig: tlsConfig, + Proxier: proxy, + } + wrapper, err := transport.HTTPWrappersForConfig(transportCfg, upgradeRoundTripper) + if err != nil { + return nil, nil, err + } + return wrapper, upgradeRoundTripper, nil +} + +// Negotiate opens a connection to a remote server and attempts to negotiate +// a WebSocket connection. Upon success, it returns the negotiated connection. +// The round tripper rt must use the WebSocket round tripper wsRt - see RoundTripperFor. +func Negotiate(rt http.RoundTripper, connectionInfo ConnectionHolder, req *http.Request, protocols ...string) (*gwebsocket.Conn, error) { + req.Header[httpstream.HeaderProtocolVersion] = protocols + resp, err := rt.RoundTrip(req) + if err != nil { + return nil, err + } + err = resp.Body.Close() + if err != nil { + connectionInfo.Connection().Close() + return nil, fmt.Errorf("error closing response body: %v", err) + } + return connectionInfo.Connection(), nil +} diff --git a/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go b/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go index 49ecd1465a..86a3d6dde9 100644 --- a/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go +++ b/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go @@ -573,6 +573,9 @@ func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) { if !ok { return nil, fmt.Errorf("can't print type %s", v.Type()) } + if iface == nil { + return []byte("null"), nil + } var buffer bytes.Buffer fmt.Fprint(&buffer, iface) return buffer.Bytes(), nil diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index 380c064552..a363d1afb4 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -238,8 +238,12 @@ func (q *Type) Done(item interface{}) { // ShutDown will cause q to ignore all new items added to it and // immediately instruct the worker goroutines to exit. func (q *Type) ShutDown() { - q.setDrain(false) - q.shutdown() + q.cond.L.Lock() + defer q.cond.L.Unlock() + + q.drain = false + q.shuttingDown = true + q.cond.Broadcast() } // ShutDownWithDrain will cause q to ignore all new items added to it. As soon @@ -252,53 +256,16 @@ func (q *Type) ShutDown() { // ShutDownWithDrain, as to force the queue shut down to terminate immediately // without waiting for the drainage. func (q *Type) ShutDownWithDrain() { - q.setDrain(true) - q.shutdown() - for q.isProcessing() && q.shouldDrain() { - q.waitForProcessing() - } -} - -// isProcessing indicates if there are still items on the work queue being -// processed. It's used to drain the work queue on an eventual shutdown. -func (q *Type) isProcessing() bool { - q.cond.L.Lock() - defer q.cond.L.Unlock() - return q.processing.len() != 0 -} - -// waitForProcessing waits for the worker goroutines to finish processing items -// and call Done on them. -func (q *Type) waitForProcessing() { - q.cond.L.Lock() - defer q.cond.L.Unlock() - // Ensure that we do not wait on a queue which is already empty, as that - // could result in waiting for Done to be called on items in an empty queue - // which has already been shut down, which will result in waiting - // indefinitely. - if q.processing.len() == 0 { - return - } - q.cond.Wait() -} - -func (q *Type) setDrain(shouldDrain bool) { - q.cond.L.Lock() - defer q.cond.L.Unlock() - q.drain = shouldDrain -} - -func (q *Type) shouldDrain() bool { q.cond.L.Lock() defer q.cond.L.Unlock() - return q.drain -} -func (q *Type) shutdown() { - q.cond.L.Lock() - defer q.cond.L.Unlock() + q.drain = true q.shuttingDown = true q.cond.Broadcast() + + for q.processing.len() != 0 && q.drain { + q.cond.Wait() + } } func (q *Type) ShuttingDown() bool { diff --git a/vendor/k8s.io/klog/v2/.golangci.yaml b/vendor/k8s.io/klog/v2/.golangci.yaml new file mode 100644 index 0000000000..0d77d65f06 --- /dev/null +++ b/vendor/k8s.io/klog/v2/.golangci.yaml @@ -0,0 +1,6 @@ +linters: + disable-all: true + enable: # sorted alphabetical + - gofmt + - misspell + - revive diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go index f325ded5e9..46de00fb06 100644 --- a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -30,14 +30,16 @@ import ( var ( // Pid is inserted into log headers. Can be overridden for tests. Pid = os.Getpid() + + // Time, if set, will be used instead of the actual current time. + Time *time.Time ) // Buffer holds a single byte.Buffer for reuse. The zero value is ready for // use. It also provides some helper methods for output formatting. type Buffer struct { bytes.Buffer - Tmp [64]byte // temporary byte array for creating headers. - next *Buffer + Tmp [64]byte // temporary byte array for creating headers. } var buffers = sync.Pool{ @@ -122,6 +124,9 @@ func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. // It's worth about 3X. Fprintf is hard. + if Time != nil { + now = *Time + } _, month, day := now.Date() hour, minute, second := now.Clock() // Lmmdd hh:mm:ss.uuuuuu threadid file:line] @@ -157,6 +162,9 @@ func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string { // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. // It's worth about 3X. Fprintf is hard. + if Time != nil { + now = *Time + } _, month, day := now.Date() hour, minute, second := now.Clock() // Lmmdd hh:mm:ss.uuuuuu threadid file:line] diff --git a/vendor/k8s.io/klog/v2/internal/clock/clock.go b/vendor/k8s.io/klog/v2/internal/clock/clock.go index b8b6af5c81..cc11bb4802 100644 --- a/vendor/k8s.io/klog/v2/internal/clock/clock.go +++ b/vendor/k8s.io/klog/v2/internal/clock/clock.go @@ -39,16 +39,6 @@ type Clock interface { // Sleep sleeps for the provided duration d. // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. Sleep(d time.Duration) - // Tick returns the channel of a new Ticker. - // This method does not allow to free/GC the backing ticker. Use - // NewTicker from WithTicker instead. - Tick(d time.Duration) <-chan time.Time -} - -// WithTicker allows for injecting fake or real clocks into code that -// needs to do arbitrary things based on time. -type WithTicker interface { - Clock // NewTicker returns a new Ticker. NewTicker(time.Duration) Ticker } @@ -66,7 +56,7 @@ type WithDelayedExecution interface { // WithTickerAndDelayedExecution allows for injecting fake or real clocks // into code that needs Ticker and AfterFunc functionality type WithTickerAndDelayedExecution interface { - WithTicker + Clock // AfterFunc executes f in its own goroutine after waiting // for d duration and returns a Timer whose channel can be // closed by calling Stop() on the Timer. @@ -79,7 +69,7 @@ type Ticker interface { Stop() } -var _ = WithTicker(RealClock{}) +var _ Clock = RealClock{} // RealClock really calls time.Now() type RealClock struct{} @@ -115,13 +105,6 @@ func (RealClock) AfterFunc(d time.Duration, f func()) Timer { } } -// Tick is the same as time.Tick(d) -// This method does not allow to free/GC the backing ticker. Use -// NewTicker instead. -func (RealClock) Tick(d time.Duration) <-chan time.Time { - return time.Tick(d) -} - // NewTicker returns a new Ticker. func (RealClock) NewTicker(d time.Duration) Ticker { return &realTicker{ diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index bcdf5f8ee1..d1a4751c94 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -172,73 +172,6 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { Formatter{}.KVListFormat(b, keysAndValues...) } -// KVFormat serializes one key/value pair into the provided buffer. -// A space gets inserted before the pair. -func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { - b.WriteByte(' ') - // Keys are assumed to be well-formed according to - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments - // for the sake of performance. Keys with spaces, - // special characters, etc. will break parsing. - if sK, ok := k.(string); ok { - // Avoid one allocation when the key is a string, which - // normally it should be. - b.WriteString(sK) - } else { - b.WriteString(fmt.Sprintf("%s", k)) - } - - // The type checks are sorted so that more frequently used ones - // come first because that is then faster in the common - // cases. In Kubernetes, ObjectRef (a Stringer) is more common - // than plain strings - // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). - switch v := v.(type) { - case textWriter: - writeTextWriterValue(b, v) - case fmt.Stringer: - writeStringValue(b, StringerToString(v)) - case string: - writeStringValue(b, v) - case error: - writeStringValue(b, ErrorToString(v)) - case logr.Marshaler: - value := MarshalerToValue(v) - // A marshaler that returns a string is useful for - // delayed formatting of complex values. We treat this - // case like a normal string. This is useful for - // multi-line support. - // - // We could do this by recursively formatting a value, - // but that comes with the risk of infinite recursion - // if a marshaler returns itself. Instead we call it - // only once and rely on it returning the intended - // value directly. - switch value := value.(type) { - case string: - writeStringValue(b, value) - default: - f.formatAny(b, value) - } - case []byte: - // In https://github.com/kubernetes/klog/pull/237 it was decided - // to format byte slices with "%+q". The advantages of that are: - // - readable output if the bytes happen to be printable - // - non-printable bytes get represented as unicode escape - // sequences (\uxxxx) - // - // The downsides are that we cannot use the faster - // strconv.Quote here and that multi-line output is not - // supported. If developers know that a byte array is - // printable and they want multi-line output, they can - // convert the value to string before logging it. - b.WriteByte('=') - b.WriteString(fmt.Sprintf("%+q", v)) - default: - f.formatAny(b, v) - } -} - func KVFormat(b *bytes.Buffer, k, v interface{}) { Formatter{}.KVFormat(b, k, v) } @@ -251,6 +184,10 @@ func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { b.WriteString(f.AnyToStringHook(v)) return } + formatAsJSON(b, v) +} + +func formatAsJSON(b *bytes.Buffer, v interface{}) { encoder := json.NewEncoder(b) l := b.Len() if err := encoder.Encode(v); err != nil { diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go new file mode 100644 index 0000000000..d9c7d15467 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go @@ -0,0 +1,97 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + + "github.com/go-logr/logr" +) + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + // This is the version without slog support. Must be kept in sync with + // the version in keyvalues_slog.go. + + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, value) + default: + f.formatAny(b, value) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go new file mode 100644 index 0000000000..89acf97723 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go @@ -0,0 +1,155 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + "log/slog" + "strconv" + + "github.com/go-logr/logr" +) + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + // This is the version without slog support. Must be kept in sync with + // the version in keyvalues_slog.go. + + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + // + // slog.LogValuer does not need to be handled here because the handler will + // already have resolved such special values to the final value for logging. + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case slog.Value: + // This must come before fmt.Stringer because slog.Value implements + // fmt.Stringer, but does not produce the output that we want. + b.WriteByte('=') + generateJSON(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, value) + default: + f.formatAny(b, value) + } + case slog.LogValuer: + value := slog.AnyValue(v).Resolve() + if value.Kind() == slog.KindString { + writeStringValue(b, value.String()) + } else { + b.WriteByte('=') + generateJSON(b, value) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} + +// generateJSON has the same preference for plain strings as KVFormat. +// In contrast to KVFormat it always produces valid JSON with no line breaks. +func generateJSON(b *bytes.Buffer, v interface{}) { + switch v := v.(type) { + case slog.Value: + switch v.Kind() { + case slog.KindGroup: + // Format as a JSON group. We must not involve f.AnyToStringHook (if there is any), + // because there is no guarantee that it produces valid JSON. + b.WriteByte('{') + for i, attr := range v.Group() { + if i > 0 { + b.WriteByte(',') + } + b.WriteString(strconv.Quote(attr.Key)) + b.WriteByte(':') + generateJSON(b, attr.Value) + } + b.WriteByte('}') + case slog.KindLogValuer: + generateJSON(b, v.Resolve()) + default: + // Peel off the slog.Value wrapper and format the actual value. + generateJSON(b, v.Any()) + } + case fmt.Stringer: + b.WriteString(strconv.Quote(StringerToString(v))) + case logr.Marshaler: + generateJSON(b, MarshalerToValue(v)) + case slog.LogValuer: + generateJSON(b, slog.AnyValue(v).Resolve().Any()) + case string: + b.WriteString(strconv.Quote(v)) + case error: + b.WriteString(strconv.Quote(v.Error())) + default: + formatAsJSON(b, v) + } +} diff --git a/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go b/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go new file mode 100644 index 0000000000..21f1697d09 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go @@ -0,0 +1,96 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sloghandler + +import ( + "context" + "log/slog" + "runtime" + "strings" + "time" + + "k8s.io/klog/v2/internal/severity" +) + +func Handle(_ context.Context, record slog.Record, groups string, printWithInfos func(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{})) error { + now := record.Time + if now.IsZero() { + // This format doesn't support printing entries without a time. + now = time.Now() + } + + // slog has numeric severity levels, with 0 as default "info", negative for debugging, and + // positive with some pre-defined levels for more important. Those ranges get mapped to + // the corresponding klog levels where possible, with "info" the default that is used + // also for negative debug levels. + level := record.Level + s := severity.InfoLog + switch { + case level >= slog.LevelError: + s = severity.ErrorLog + case level >= slog.LevelWarn: + s = severity.WarningLog + } + + var file string + var line int + if record.PC != 0 { + // Same as https://cs.opensource.google/go/x/exp/+/642cacee:slog/record.go;drc=642cacee5cc05231f45555a333d07f1005ffc287;l=70 + fs := runtime.CallersFrames([]uintptr{record.PC}) + f, _ := fs.Next() + if f.File != "" { + file = f.File + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + line = f.Line + } + } else { + file = "???" + line = 1 + } + + kvList := make([]interface{}, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = appendAttr(groups, kvList, attr) + return true + }) + + printWithInfos(file, line, now, nil, s, record.Message, kvList) + return nil +} + +func Attrs2KVList(groups string, attrs []slog.Attr) []interface{} { + kvList := make([]interface{}, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = appendAttr(groups, kvList, attr) + } + return kvList +} + +func appendAttr(groups string, kvList []interface{}, attr slog.Attr) []interface{} { + var key string + if groups != "" { + key = groups + "." + attr.Key + } else { + key = attr.Key + } + return append(kvList, key, attr.Value) +} diff --git a/vendor/k8s.io/klog/v2/k8s_references_slog.go b/vendor/k8s.io/klog/v2/k8s_references_slog.go new file mode 100644 index 0000000000..5522c84c77 --- /dev/null +++ b/vendor/k8s.io/klog/v2/k8s_references_slog.go @@ -0,0 +1,39 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "log/slog" +) + +func (ref ObjectRef) LogValue() slog.Value { + if ref.Namespace != "" { + return slog.GroupValue(slog.String("name", ref.Name), slog.String("namespace", ref.Namespace)) + } + return slog.GroupValue(slog.String("name", ref.Name)) +} + +var _ slog.LogValuer = ObjectRef{} + +func (ks kobjSlice) LogValue() slog.Value { + return slog.AnyValue(ks.MarshalLog()) +} + +var _ slog.LogValuer = kobjSlice{} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 152f8a6bd6..72502db3ae 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -415,7 +415,7 @@ func init() { logging.stderrThreshold = severityValue{ Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. } - commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)") + commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true)") commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") @@ -518,9 +518,7 @@ type settings struct { func (s settings) deepCopy() settings { // vmodule is a slice and would be shared, so we have copy it. filter := make([]modulePat, len(s.vmodule.filter)) - for i := range s.vmodule.filter { - filter[i] = s.vmodule.filter[i] - } + copy(filter, s.vmodule.filter) s.vmodule.filter = filter if s.logger != nil { @@ -657,16 +655,15 @@ func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, strin } } } - return l.formatHeader(s, file, line), file, line + return l.formatHeader(s, file, line, timeNow()), file, line } // formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer { +func (l *loggingT) formatHeader(s severity.Severity, file string, line int, now time.Time) *buffer.Buffer { buf := buffer.GetBuffer() if l.skipHeaders { return buf } - now := timeNow() buf.FormatHeader(s, file, line, now) return buf } @@ -676,6 +673,10 @@ func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFil } func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + if false { + _ = fmt.Sprintln(args...) // cause vet to treat this function like fmt.Println + } + buf, file, line := l.header(s, depth) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing @@ -696,7 +697,15 @@ func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilte } func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + if false { + _ = fmt.Sprint(args...) // // cause vet to treat this function like fmt.Print + } + buf, file, line := l.header(s, depth) + l.printWithInfos(buf, file, line, s, logger, filter, depth+1, args...) +} + +func (l *loggingT) printWithInfos(buf *buffer.Buffer, file string, line int, s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing // logger implementation to print headers. @@ -719,6 +728,10 @@ func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilt } func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) { + if false { + _ = fmt.Sprintf(format, args...) // cause vet to treat this function like fmt.Printf + } + buf, file, line := l.header(s, depth) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing @@ -741,7 +754,7 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter Lo // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) + buf := l.formatHeader(s, file, line, timeNow()) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing // logger implementation to print headers. @@ -759,7 +772,7 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, fil l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr) } -// if loggr is specified, will call loggr.Error, otherwise output with logging module. +// if logger is specified, will call logger.Error, otherwise output with logging module. func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) @@ -771,7 +784,7 @@ func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...) } -// if loggr is specified, will call loggr.Info, otherwise output with logging module. +// if logger is specified, will call logger.Info, otherwise output with logging module. func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) @@ -783,7 +796,7 @@ func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg str l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...) } -// printS is called from infoS and errorS if loggr is not specified. +// printS is called from infoS and errorS if logger is not specified. // set log severity by s func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { // Only create a new buffer if we don't have one cached. @@ -796,7 +809,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, serialize.KVListFormat(&b.Buffer, "err", err) } serialize.KVListFormat(&b.Buffer, keysAndValues...) - l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer) + l.printDepth(s, nil, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. buffer.PutBuffer(b) } @@ -873,6 +886,9 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu if logger.writeKlogBuffer != nil { logger.writeKlogBuffer(data) } else { + if len(data) > 0 && data[len(data)-1] == '\n' { + data = data[:len(data)-1] + } // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} if s == severity.ErrorLog { @@ -897,7 +913,7 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu l.exit(err) } } - l.file[severity.InfoLog].Write(data) + _, _ = l.file[severity.InfoLog].Write(data) } else { if l.file[s] == nil { if err := l.createFiles(s); err != nil { @@ -907,20 +923,20 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu } if l.oneOutput { - l.file[s].Write(data) + _, _ = l.file[s].Write(data) } else { switch s { case severity.FatalLog: - l.file[severity.FatalLog].Write(data) + _, _ = l.file[severity.FatalLog].Write(data) fallthrough case severity.ErrorLog: - l.file[severity.ErrorLog].Write(data) + _, _ = l.file[severity.ErrorLog].Write(data) fallthrough case severity.WarningLog: - l.file[severity.WarningLog].Write(data) + _, _ = l.file[severity.WarningLog].Write(data) fallthrough case severity.InfoLog: - l.file[severity.InfoLog].Write(data) + _, _ = l.file[severity.InfoLog].Write(data) } } } @@ -946,7 +962,7 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu logExitFunc = func(error) {} // If we get a write error, we'll still exit below. for log := severity.FatalLog; log >= severity.InfoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) + _, _ = f.Write(trace) } } l.mu.Unlock() @@ -1102,7 +1118,7 @@ const flushInterval = 5 * time.Second // flushDaemon periodically flushes the log file buffers. type flushDaemon struct { mu sync.Mutex - clock clock.WithTicker + clock clock.Clock flush func() stopC chan struct{} stopDone chan struct{} @@ -1110,7 +1126,7 @@ type flushDaemon struct { // newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a // clock.RealClock is used. -func newFlushDaemon(flush func(), tickClock clock.WithTicker) *flushDaemon { +func newFlushDaemon(flush func(), tickClock clock.Clock) *flushDaemon { if tickClock == nil { tickClock = clock.RealClock{} } @@ -1201,8 +1217,8 @@ func (l *loggingT) flushAll() { for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error + _ = file.Flush() // ignore error + _ = file.Sync() // ignore error } } if logging.loggerOptions.flush != nil { @@ -1281,9 +1297,7 @@ func (l *loggingT) setV(pc uintptr) Level { fn := runtime.FuncForPC(pc) file, _ := fn.FileLine(pc) // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } + file = strings.TrimSuffix(file, ".go") if slash := strings.LastIndex(file, "/"); slash >= 0 { file = file[slash+1:] } diff --git a/vendor/k8s.io/klog/v2/klog_file.go b/vendor/k8s.io/klog/v2/klog_file.go index 1025d644f3..8bee16204d 100644 --- a/vendor/k8s.io/klog/v2/klog_file.go +++ b/vendor/k8s.io/klog/v2/klog_file.go @@ -109,8 +109,8 @@ func create(tag string, t time.Time, startup bool) (f *os.File, filename string, f, err := openOrCreate(fname, startup) if err == nil { symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err + _ = os.Remove(symlink) // ignore err + _ = os.Symlink(name, symlink) // ignore err return f, fname, nil } lastErr = err diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go index 15de00e21f..efec96fd45 100644 --- a/vendor/k8s.io/klog/v2/klogr.go +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -22,6 +22,11 @@ import ( "k8s.io/klog/v2/internal/serialize" ) +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" +) + // NewKlogr returns a logger that is functionally identical to // klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The // difference is that it uses a simpler implementation. @@ -32,10 +37,15 @@ func NewKlogr() Logger { // klogger is a subset of klogr/klogr.go. It had to be copied to break an // import cycle (klogr wants to use klog, and klog wants to use klogr). type klogger struct { - level int callDepth int - prefix string - values []interface{} + + // hasPrefix is true if the first entry in values is the special + // nameKey key/value. Such an entry gets added and later updated in + // WithName. + hasPrefix bool + + values []interface{} + groups string } func (l *klogger) Init(info logr.RuntimeInfo) { @@ -44,34 +54,40 @@ func (l *klogger) Init(info logr.RuntimeInfo) { func (l *klogger) Info(level int, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) - if l.prefix != "" { - msg = l.prefix + ": " + msg - } // Skip this function. VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) } func (l *klogger) Enabled(level int) bool { - // Skip this function and logr.Logger.Info where Enabled is called. - return VDepth(l.callDepth+2, Level(level)).Enabled() + return VDepth(l.callDepth+1, Level(level)).Enabled() } func (l *klogger) Error(err error, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) - if l.prefix != "" { - msg = l.prefix + ": " + msg - } ErrorSDepth(l.callDepth+1, err, msg, merged...) } // WithName returns a new logr.Logger with the specified name appended. klogr -// uses '/' characters to separate name elements. Callers should not pass '/' +// uses '.' characters to separate name elements. Callers should not pass '.' // in the provided name string, but this library does not actually enforce that. func (l klogger) WithName(name string) logr.LogSink { - if len(l.prefix) > 0 { - l.prefix = l.prefix + "/" + if l.hasPrefix { + // Copy slice and modify value. No length checks and type + // assertions are needed because hasPrefix is only true if the + // first two elements exist and are key/value strings. + v := make([]interface{}, 0, len(l.values)) + v = append(v, l.values...) + prefix, _ := v[1].(string) + v[1] = prefix + "." + name + l.values = v + } else { + // Preprend new key/value pair. + v := make([]interface{}, 0, 2+len(l.values)) + v = append(v, nameKey, name) + v = append(v, l.values...) + l.values = v + l.hasPrefix = true } - l.prefix += name return &l } diff --git a/vendor/k8s.io/klog/v2/klogr_slog.go b/vendor/k8s.io/klog/v2/klogr_slog.go new file mode 100644 index 0000000000..f7bf740306 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klogr_slog.go @@ -0,0 +1,96 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "context" + "log/slog" + "strconv" + "time" + + "github.com/go-logr/logr/slogr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" + "k8s.io/klog/v2/internal/sloghandler" +) + +func (l *klogger) Handle(ctx context.Context, record slog.Record) error { + if logging.logger != nil { + if slogSink, ok := logging.logger.GetSink().(slogr.SlogSink); ok { + // Let that logger do the work. + return slogSink.Handle(ctx, record) + } + } + + return sloghandler.Handle(ctx, record, l.groups, slogOutput) +} + +// slogOutput corresponds to several different functions in klog.go. +// It goes through some of the same checks and formatting steps before +// it ultimately converges by calling logging.printWithInfos. +func slogOutput(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{}) { + // See infoS. + if logging.logger != nil { + // Taking this path happens when klog has a logger installed + // as backend which doesn't support slog. Not good, we have to + // guess about the call depth and drop the actual location. + logger := logging.logger.WithCallDepth(2) + if s > severity.ErrorLog { + logger.Error(err, msg, kvList...) + } else { + logger.Info(msg, kvList...) + } + return + } + + // See printS. + b := buffer.GetBuffer() + b.WriteString(strconv.Quote(msg)) + if err != nil { + serialize.KVListFormat(&b.Buffer, "err", err) + } + serialize.KVListFormat(&b.Buffer, kvList...) + + // See print + header. + buf := logging.formatHeader(s, file, line, now) + logging.printWithInfos(buf, file, line, s, nil, nil, 0, &b.Buffer) + + buffer.PutBuffer(b) +} + +func (l *klogger) WithAttrs(attrs []slog.Attr) slogr.SlogSink { + clone := *l + clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs)) + return &clone +} + +func (l *klogger) WithGroup(name string) slogr.SlogSink { + clone := *l + if clone.groups != "" { + clone.groups += "." + name + } else { + clone.groups = name + } + return &clone +} + +var _ slogr.SlogSink = &klogger{} diff --git a/vendor/k8s.io/kube-openapi/pkg/builder3/util/util.go b/vendor/k8s.io/kube-openapi/pkg/builder3/util/util.go deleted file mode 100644 index e015669256..0000000000 --- a/vendor/k8s.io/kube-openapi/pkg/builder3/util/util.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "reflect" - - "k8s.io/kube-openapi/pkg/schemamutation" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// wrapRefs wraps OpenAPI V3 Schema refs that contain sibling elements. -// AllOf is used to wrap the Ref to prevent references from having sibling elements -// Please see https://github.com/kubernetes/kubernetes/issues/106387#issuecomment-967640388 -func WrapRefs(schema *spec.Schema) *spec.Schema { - walker := schemamutation.Walker{ - SchemaCallback: func(schema *spec.Schema) *spec.Schema { - orig := schema - clone := func() { - if orig == schema { - schema = new(spec.Schema) - *schema = *orig - } - } - if schema.Ref.String() != "" && !reflect.DeepEqual(*schema, spec.Schema{SchemaProps: spec.SchemaProps{Ref: schema.Ref}}) { - clone() - refSchema := new(spec.Schema) - refSchema.Ref = schema.Ref - schema.Ref = spec.Ref{} - schema.AllOf = []spec.Schema{*refSchema} - } - return schema - }, - RefCallback: schemamutation.RefCallbackNoop, - } - return walker.WalkSchema(schema) -} diff --git a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go index 76415b7830..a66fe8a095 100644 --- a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go +++ b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package cache provides a cache mechanism based on etags to lazily +// Package cached provides a cache mechanism based on etags to lazily // build, and/or cache results from expensive operation such that those // operations are not repeated unnecessarily. The operations can be // created as a tree, and replaced dynamically as needed. @@ -25,16 +25,18 @@ limitations under the License. // // This package uses a source/transform/sink model of caches to build // the dependency tree, and can be used as follows: -// - [NewSource]: A source cache that recomputes the content every time. -// - [NewStaticSource]: A source cache that always produces the +// - [Func]: A source cache that recomputes the content every time. +// - [Once]: A source cache that always produces the // same content, it is only called once. -// - [NewTransformer]: A cache that transforms data from one format to +// - [Transform]: A cache that transforms data from one format to // another. It's only refreshed when the source changes. -// - [NewMerger]: A cache that aggregates multiple caches into one. +// - [Merge]: A cache that aggregates multiple caches in a map into one. // It's only refreshed when the source changes. -// - [Replaceable]: A cache adapter that can be atomically -// replaced with a new one, and saves the previous results in case an -// error pops-up. +// - [MergeList]: A cache that aggregates multiple caches in a list into one. +// It's only refreshed when the source changes. +// - [Atomic]: A cache adapter that atomically replaces the source with a new one. +// - [LastSuccess]: A cache adapter that caches the last successful and returns +// it if the next call fails. It extends [Atomic]. // // # Etags // @@ -54,61 +56,45 @@ import ( "sync/atomic" ) -// Result is the content returned from a call to a cache. It can either -// be created with [NewResultOK] if the call was a success, or -// [NewResultErr] if the call resulted in an error. +// Value is wrapping a value behind a getter for lazy evaluation. +type Value[T any] interface { + Get() (value T, etag string, err error) +} + +// Result is wrapping T and error into a struct for cases where a tuple is more +// convenient or necessary in Golang. type Result[T any] struct { - Data T - Etag string - Err error + Value T + Etag string + Err error } -// NewResultOK creates a new [Result] for a successful operation. -func NewResultOK[T any](data T, etag string) Result[T] { - return Result[T]{ - Data: data, - Etag: etag, - } +func (r Result[T]) Get() (T, string, error) { + return r.Value, r.Etag, r.Err } -// NewResultErr creates a new [Result] when an error has happened. -func NewResultErr[T any](err error) Result[T] { - return Result[T]{ - Err: err, - } +// Func wraps a (thread-safe) function as a Value[T]. +func Func[T any](fn func() (T, string, error)) Value[T] { + return valueFunc[T](fn) } -// Result can be treated as a [Data] if necessary. -func (r Result[T]) Get() Result[T] { - return r +type valueFunc[T any] func() (T, string, error) + +func (c valueFunc[T]) Get() (T, string, error) { + return c() } -// Data is a cache that performs an action whose result data will be -// cached. It also returns an "etag" identifier to version the cache, so -// that the caller can know if they have the most recent version of the -// cache (and can decide to cache some operation based on that). -// -// The [NewMerger] and [NewTransformer] automatically handle -// that for you by checking if the etag is updated before calling the -// merging or transforming function. -type Data[T any] interface { - // Returns the cached data, as well as an "etag" to identify the - // version of the cache, or an error if something happened. - Get() Result[T] +// Static returns constant values. +func Static[T any](value T, etag string) Value[T] { + return Result[T]{Value: value, Etag: etag} } -// NewMerger creates a new merge cache, a cache that merges the result -// of other caches. The function only gets called if any of the -// dependency has changed. +// Merge merges a of cached values. The merge function only gets called if any of +// the dependency has changed. // // If any of the dependency returned an error before, or any of the // dependency returned an error this time, or if the mergeFn failed -// before, then the function is reran. -// -// The caches and results are mapped by K so that associated data can be -// retrieved. The map of dependencies can not be modified after -// creation, and a new merger should be created (and probably replaced -// using a [Replaceable]). +// before, then the function is run again. // // Note that this assumes there is no "partial" merge, the merge // function will remerge all the dependencies together everytime. Since @@ -118,18 +104,19 @@ type Data[T any] interface { // Also note that Golang map iteration is not stable. If the mergeFn // depends on the order iteration to be stable, it will need to // implement its own sorting or iteration order. -func NewMerger[K comparable, T, V any](mergeFn func(results map[K]Result[T]) Result[V], caches map[K]Data[T]) Data[V] { - listCaches := make([]Data[T], 0, len(caches)) - // maps from index to key +func Merge[K comparable, T, V any](mergeFn func(results map[K]Result[T]) (V, string, error), caches map[K]Value[T]) Value[V] { + list := make([]Value[T], 0, len(caches)) + + // map from index to key indexes := make(map[int]K, len(caches)) i := 0 for k := range caches { - listCaches = append(listCaches, caches[k]) + list = append(list, caches[k]) indexes[i] = k i++ } - return NewListMerger(func(results []Result[T]) Result[V] { + return MergeList(func(results []Result[T]) (V, string, error) { if len(results) != len(indexes) { panic(fmt.Errorf("invalid result length %d, expected %d", len(results), len(indexes))) } @@ -138,20 +125,11 @@ func NewMerger[K comparable, T, V any](mergeFn func(results map[K]Result[T]) Res m[indexes[i]] = results[i] } return mergeFn(m) - }, listCaches) -} - -type listMerger[T, V any] struct { - lock sync.Mutex - mergeFn func([]Result[T]) Result[V] - caches []Data[T] - cacheResults []Result[T] - result Result[V] + }, list) } -// NewListMerger creates a new merge cache that merges the results of -// other caches in list form. The function only gets called if any of -// the dependency has changed. +// MergeList merges a list of cached values. The function only gets called if +// any of the dependency has changed. // // The benefit of ListMerger over the basic Merger is that caches are // stored in an ordered list so the order of the cache will be @@ -165,31 +143,37 @@ type listMerger[T, V any] struct { // function will remerge all the dependencies together everytime. Since // the list of dependencies is constant, there is no way to save some // partial merge information either. -func NewListMerger[T, V any](mergeFn func(results []Result[T]) Result[V], caches []Data[T]) Data[V] { +func MergeList[T, V any](mergeFn func(results []Result[T]) (V, string, error), delegates []Value[T]) Value[V] { return &listMerger[T, V]{ - mergeFn: mergeFn, - caches: caches, + mergeFn: mergeFn, + delegates: delegates, } } +type listMerger[T, V any] struct { + lock sync.Mutex + mergeFn func([]Result[T]) (V, string, error) + delegates []Value[T] + cache []Result[T] + result Result[V] +} + func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] { - cacheResults := make([]Result[T], len(c.caches)) + cacheResults := make([]Result[T], len(c.delegates)) ch := make(chan struct { int Result[T] - }, len(c.caches)) - for i := range c.caches { + }, len(c.delegates)) + for i := range c.delegates { go func(index int) { + value, etag, err := c.delegates[index].Get() ch <- struct { int Result[T] - }{ - index, - c.caches[index].Get(), - } + }{index, Result[T]{Value: value, Etag: etag, Err: err}} }(i) } - for i := 0; i < len(c.caches); i++ { + for i := 0; i < len(c.delegates); i++ { res := <-ch cacheResults[res.int] = res.Result } @@ -197,16 +181,16 @@ func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] { } func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool { - if c.cacheResults == nil { + if c.cache == nil { return true } if c.result.Err != nil { return true } - if len(results) != len(c.cacheResults) { - panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cacheResults))) + if len(results) != len(c.cache) { + panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cache))) } - for i, oldResult := range c.cacheResults { + for i, oldResult := range c.cache { newResult := results[i] if newResult.Etag != oldResult.Etag || newResult.Err != nil || oldResult.Err != nil { return true @@ -215,98 +199,92 @@ func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool { return false } -func (c *listMerger[T, V]) Get() Result[V] { +func (c *listMerger[T, V]) Get() (V, string, error) { c.lock.Lock() defer c.lock.Unlock() cacheResults := c.prepareResultsLocked() if c.needsRunningLocked(cacheResults) { - c.cacheResults = cacheResults - c.result = c.mergeFn(c.cacheResults) + c.cache = cacheResults + c.result.Value, c.result.Etag, c.result.Err = c.mergeFn(c.cache) } - return c.result + return c.result.Value, c.result.Etag, c.result.Err } -// NewTransformer creates a new cache that transforms the result of -// another cache. The transformFn will only be called if the source -// cache has updated the output, otherwise, the cached result will be -// returned. +// Transform the result of another cached value. The transformFn will only be called +// if the source has updated, otherwise, the result will be returned. // // If the dependency returned an error before, or it returns an error // this time, or if the transformerFn failed before, the function is // reran. -func NewTransformer[T, V any](transformerFn func(Result[T]) Result[V], source Data[T]) Data[V] { - return NewListMerger(func(caches []Result[T]) Result[V] { - if len(caches) != 1 { - panic(fmt.Errorf("invalid cache for transformer cache: %v", caches)) +func Transform[T, V any](transformerFn func(T, string, error) (V, string, error), source Value[T]) Value[V] { + return MergeList(func(delegates []Result[T]) (V, string, error) { + if len(delegates) != 1 { + panic(fmt.Errorf("invalid cache for transformer cache: %v", delegates)) } - return transformerFn(caches[0]) - }, []Data[T]{source}) -} - -// NewSource creates a new cache that generates some data. This -// will always be called since we don't know the origin of the data and -// if it needs to be updated or not. sourceFn MUST be thread-safe. -func NewSource[T any](sourceFn func() Result[T]) Data[T] { - c := source[T](sourceFn) - return &c + return transformerFn(delegates[0].Value, delegates[0].Etag, delegates[0].Err) + }, []Value[T]{source}) } -type source[T any] func() Result[T] - -func (c *source[T]) Get() Result[T] { - return (*c)() -} - -// NewStaticSource creates a new cache that always generates the -// same data. This will only be called once (lazily). -func NewStaticSource[T any](staticFn func() Result[T]) Data[T] { - return &static[T]{ - fn: staticFn, +// Once calls Value[T].Get() lazily and only once, even in case of an error result. +func Once[T any](d Value[T]) Value[T] { + return &once[T]{ + data: d, } } -type static[T any] struct { +type once[T any] struct { once sync.Once - fn func() Result[T] + data Value[T] result Result[T] } -func (c *static[T]) Get() Result[T] { +func (c *once[T]) Get() (T, string, error) { c.once.Do(func() { - c.result = c.fn() + c.result.Value, c.result.Etag, c.result.Err = c.data.Get() }) - return c.result + return c.result.Value, c.result.Etag, c.result.Err } -// Replaceable is a cache that carries the result even when the cache is -// replaced. This is the type that should typically be stored in -// structs. -type Replaceable[T any] struct { - cache atomic.Pointer[Data[T]] - result atomic.Pointer[Result[T]] +// Replaceable extends the Value[T] interface with the ability to change the +// underlying Value[T] after construction. +type Replaceable[T any] interface { + Value[T] + Store(Value[T]) } -// Get retrieves the data from the underlying source. [Replaceable] -// implements the [Data] interface itself. This is a pass-through -// that calls the most recent underlying cache. If the cache fails but -// previously had returned a success, that success will be returned -// instead. If the cache fails but we never returned a success, that -// failure is returned. -func (c *Replaceable[T]) Get() Result[T] { - result := (*c.cache.Load()).Get() - - for { - cResult := c.result.Load() - if result.Err != nil && cResult != nil && cResult.Err == nil { - return *cResult - } - if c.result.CompareAndSwap(cResult, &result) { - return result +// Atomic wraps a Value[T] as an atomic value that can be replaced. It implements +// Replaceable[T]. +type Atomic[T any] struct { + value atomic.Pointer[Value[T]] +} + +var _ Replaceable[[]byte] = &Atomic[[]byte]{} + +func (x *Atomic[T]) Store(val Value[T]) { x.value.Store(&val) } +func (x *Atomic[T]) Get() (T, string, error) { return (*x.value.Load()).Get() } + +// LastSuccess calls Value[T].Get(), but hides errors by returning the last +// success if there has been any. +type LastSuccess[T any] struct { + Atomic[T] + success atomic.Pointer[Result[T]] +} + +var _ Replaceable[[]byte] = &LastSuccess[[]byte]{} + +func (c *LastSuccess[T]) Get() (T, string, error) { + success := c.success.Load() + value, etag, err := c.Atomic.Get() + if err == nil { + if success == nil { + c.success.CompareAndSwap(nil, &Result[T]{Value: value, Etag: etag, Err: err}) } + return value, etag, err + } + + if success != nil { + return success.Value, success.Etag, success.Err } -} -// Replace changes the cache. -func (c *Replaceable[T]) Replace(cache Data[T]) { - c.cache.Swap(&cache) + return value, etag, err } diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index 1a6c12e17a..2e15e163c5 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -22,7 +22,6 @@ import ( "github.com/emicklei/go-restful/v3" - "k8s.io/kube-openapi/pkg/openapiconv" "k8s.io/kube-openapi/pkg/spec3" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -172,43 +171,6 @@ type OpenAPIV3Config struct { DefaultSecurity []map[string][]string } -// ConvertConfigToV3 converts a Config object to an OpenAPIV3Config object -func ConvertConfigToV3(config *Config) *OpenAPIV3Config { - if config == nil { - return nil - } - - v3Config := &OpenAPIV3Config{ - Info: config.Info, - IgnorePrefixes: config.IgnorePrefixes, - GetDefinitions: config.GetDefinitions, - GetOperationIDAndTags: config.GetOperationIDAndTags, - GetOperationIDAndTagsFromRoute: config.GetOperationIDAndTagsFromRoute, - GetDefinitionName: config.GetDefinitionName, - Definitions: config.Definitions, - SecuritySchemes: make(spec3.SecuritySchemes), - DefaultSecurity: config.DefaultSecurity, - DefaultResponse: openapiconv.ConvertResponse(config.DefaultResponse, []string{"application/json"}), - - CommonResponses: make(map[int]*spec3.Response), - ResponseDefinitions: make(map[string]*spec3.Response), - } - - if config.SecurityDefinitions != nil { - for s, securityScheme := range *config.SecurityDefinitions { - v3Config.SecuritySchemes[s] = openapiconv.ConvertSecurityScheme(securityScheme) - } - } - for k, commonResponse := range config.CommonResponses { - v3Config.CommonResponses[k] = openapiconv.ConvertResponse(&commonResponse, []string{"application/json"}) - } - - for k, responseDefinition := range config.ResponseDefinitions { - v3Config.ResponseDefinitions[k] = openapiconv.ConvertResponse(&responseDefinition, []string{"application/json"}) - } - return v3Config -} - type typeInfo struct { name string format string diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go index 2263e2f32b..fc45634887 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go @@ -33,6 +33,7 @@ import ( openapi_v3 "github.com/google/gnostic-models/openapiv3" "github.com/google/uuid" "github.com/munnerz/goautoneg" + "k8s.io/klog/v2" "k8s.io/kube-openapi/pkg/cached" "k8s.io/kube-openapi/pkg/common" @@ -73,38 +74,38 @@ type timedSpec struct { // This type is protected by the lock on OpenAPIService. type openAPIV3Group struct { - specCache cached.Replaceable[*spec3.OpenAPI] - pbCache cached.Data[timedSpec] - jsonCache cached.Data[timedSpec] + specCache cached.LastSuccess[*spec3.OpenAPI] + pbCache cached.Value[timedSpec] + jsonCache cached.Value[timedSpec] } func newOpenAPIV3Group() *openAPIV3Group { o := &openAPIV3Group{} - o.jsonCache = cached.NewTransformer[*spec3.OpenAPI](func(result cached.Result[*spec3.OpenAPI]) cached.Result[timedSpec] { - if result.Err != nil { - return cached.NewResultErr[timedSpec](result.Err) + o.jsonCache = cached.Transform[*spec3.OpenAPI](func(spec *spec3.OpenAPI, etag string, err error) (timedSpec, string, error) { + if err != nil { + return timedSpec{}, "", err } - json, err := json.Marshal(result.Data) + json, err := json.Marshal(spec) if err != nil { - return cached.NewResultErr[timedSpec](err) + return timedSpec{}, "", err } - return cached.NewResultOK(timedSpec{spec: json, lastModified: time.Now()}, computeETag(json)) + return timedSpec{spec: json, lastModified: time.Now()}, computeETag(json), nil }, &o.specCache) - o.pbCache = cached.NewTransformer(func(result cached.Result[timedSpec]) cached.Result[timedSpec] { - if result.Err != nil { - return cached.NewResultErr[timedSpec](result.Err) + o.pbCache = cached.Transform(func(ts timedSpec, etag string, err error) (timedSpec, string, error) { + if err != nil { + return timedSpec{}, "", err } - proto, err := ToV3ProtoBinary(result.Data.spec) + proto, err := ToV3ProtoBinary(ts.spec) if err != nil { - return cached.NewResultErr[timedSpec](err) + return timedSpec{}, "", err } - return cached.NewResultOK(timedSpec{spec: proto, lastModified: result.Data.lastModified}, result.Etag) + return timedSpec{spec: proto, lastModified: ts.lastModified}, etag, nil }, o.jsonCache) return o } -func (o *openAPIV3Group) UpdateSpec(openapi cached.Data[*spec3.OpenAPI]) { - o.specCache.Replace(openapi) +func (o *openAPIV3Group) UpdateSpec(openapi cached.Value[*spec3.OpenAPI]) { + o.specCache.Store(openapi) } // OpenAPIService is the service responsible for serving OpenAPI spec. It has @@ -114,7 +115,7 @@ type OpenAPIService struct { mutex sync.Mutex v3Schema map[string]*openAPIV3Group - discoveryCache cached.Replaceable[timedSpec] + discoveryCache cached.LastSuccess[timedSpec] } func computeETag(data []byte) string { @@ -137,20 +138,20 @@ func NewOpenAPIService() *OpenAPIService { o := &OpenAPIService{} o.v3Schema = make(map[string]*openAPIV3Group) // We're not locked because we haven't shared the structure yet. - o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) return o } -func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Data[timedSpec] { - caches := make(map[string]cached.Data[timedSpec], len(o.v3Schema)) +func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Value[timedSpec] { + caches := make(map[string]cached.Value[timedSpec], len(o.v3Schema)) for gvName, group := range o.v3Schema { caches[gvName] = group.jsonCache } - return cached.NewMerger(func(results map[string]cached.Result[timedSpec]) cached.Result[timedSpec] { + return cached.Merge(func(results map[string]cached.Result[timedSpec]) (timedSpec, string, error) { discovery := &OpenAPIV3Discovery{Paths: make(map[string]OpenAPIV3DiscoveryGroupVersion)} for gvName, result := range results { if result.Err != nil { - return cached.NewResultErr[timedSpec](result.Err) + return timedSpec{}, "", result.Err } discovery.Paths[gvName] = OpenAPIV3DiscoveryGroupVersion{ ServerRelativeURL: constructServerRelativeURL(gvName, result.Etag), @@ -158,9 +159,9 @@ func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Data[timedSpec] { } j, err := json.Marshal(discovery) if err != nil { - return cached.NewResultErr[timedSpec](err) + return timedSpec{}, "", err } - return cached.NewResultOK(timedSpec{spec: j, lastModified: time.Now()}, computeETag(j)) + return timedSpec{spec: j, lastModified: time.Now()}, computeETag(j), nil }, caches) } @@ -171,32 +172,32 @@ func (o *OpenAPIService) getSingleGroupBytes(getType string, group string) ([]by if !ok { return nil, "", time.Now(), fmt.Errorf("Cannot find CRD group %s", group) } - result := cached.Result[timedSpec]{} switch getType { case subTypeJSON: - result = v.jsonCache.Get() + ts, etag, err := v.jsonCache.Get() + return ts.spec, etag, ts.lastModified, err case subTypeProtobuf, subTypeProtobufDeprecated: - result = v.pbCache.Get() + ts, etag, err := v.pbCache.Get() + return ts.spec, etag, ts.lastModified, err default: return nil, "", time.Now(), fmt.Errorf("Invalid accept clause %s", getType) } - return result.Data.spec, result.Etag, result.Data.lastModified, result.Err } // UpdateGroupVersionLazy adds or updates an existing group with the new cached. -func (o *OpenAPIService) UpdateGroupVersionLazy(group string, openapi cached.Data[*spec3.OpenAPI]) { +func (o *OpenAPIService) UpdateGroupVersionLazy(group string, openapi cached.Value[*spec3.OpenAPI]) { o.mutex.Lock() defer o.mutex.Unlock() if _, ok := o.v3Schema[group]; !ok { o.v3Schema[group] = newOpenAPIV3Group() // Since there is a new item, we need to re-build the cache map. - o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) } o.v3Schema[group].UpdateSpec(openapi) } func (o *OpenAPIService) UpdateGroupVersion(group string, openapi *spec3.OpenAPI) { - o.UpdateGroupVersionLazy(group, cached.NewResultOK(openapi, uuid.New().String())) + o.UpdateGroupVersionLazy(group, cached.Static(openapi, uuid.New().String())) } func (o *OpenAPIService) DeleteGroupVersion(group string) { @@ -204,19 +205,19 @@ func (o *OpenAPIService) DeleteGroupVersion(group string) { defer o.mutex.Unlock() delete(o.v3Schema, group) // Rebuild the merge cache map since the items have changed. - o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) } func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) { - result := o.discoveryCache.Get() - if result.Err != nil { - klog.Errorf("Error serving discovery: %s", result.Err) + ts, etag, err := o.discoveryCache.Get() + if err != nil { + klog.Errorf("Error serving discovery: %s", err) w.WriteHeader(http.StatusInternalServerError) return } - w.Header().Set("Etag", strconv.Quote(result.Etag)) + w.Header().Set("Etag", strconv.Quote(etag)) w.Header().Set("Content-Type", "application/json") - http.ServeContent(w, r, "/openapi/v3", result.Data.lastModified, bytes.NewReader(result.Data.spec)) + http.ServeContent(w, r, "/openapi/v3", ts.lastModified, bytes.NewReader(ts.spec)) } func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Request) { diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go index bef6037823..da5485f6a6 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go @@ -22,3 +22,4 @@ var UseOptimizedJSONUnmarshalingV3 bool = true // Used by tests to selectively disable experimental JSON marshaler var UseOptimizedJSONMarshaling bool = true +var UseOptimizedJSONMarshalingV3 bool = true diff --git a/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go b/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go deleted file mode 100644 index e993fe23d5..0000000000 --- a/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go +++ /dev/null @@ -1,322 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package openapiconv - -import ( - "strings" - - klog "k8s.io/klog/v2" - builderutil "k8s.io/kube-openapi/pkg/builder3/util" - "k8s.io/kube-openapi/pkg/spec3" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -var OpenAPIV2DefPrefix = "#/definitions/" -var OpenAPIV3DefPrefix = "#/components/schemas/" - -// ConvertV2ToV3 converts an OpenAPI V2 object into V3. -// Certain references may be shared between the V2 and V3 objects in the conversion. -func ConvertV2ToV3(v2Spec *spec.Swagger) *spec3.OpenAPI { - v3Spec := &spec3.OpenAPI{ - Version: "3.0.0", - Info: v2Spec.Info, - ExternalDocs: ConvertExternalDocumentation(v2Spec.ExternalDocs), - Paths: ConvertPaths(v2Spec.Paths), - Components: ConvertComponents(v2Spec.SecurityDefinitions, v2Spec.Definitions, v2Spec.Responses, v2Spec.Produces), - } - - return v3Spec -} - -func ConvertExternalDocumentation(v2ED *spec.ExternalDocumentation) *spec3.ExternalDocumentation { - if v2ED == nil { - return nil - } - return &spec3.ExternalDocumentation{ - ExternalDocumentationProps: spec3.ExternalDocumentationProps{ - Description: v2ED.Description, - URL: v2ED.URL, - }, - } -} - -func ConvertComponents(v2SecurityDefinitions spec.SecurityDefinitions, v2Definitions spec.Definitions, v2Responses map[string]spec.Response, produces []string) *spec3.Components { - components := &spec3.Components{} - - if v2Definitions != nil { - components.Schemas = make(map[string]*spec.Schema) - } - for s, schema := range v2Definitions { - components.Schemas[s] = ConvertSchema(&schema) - } - if v2SecurityDefinitions != nil { - components.SecuritySchemes = make(spec3.SecuritySchemes) - } - for s, securityScheme := range v2SecurityDefinitions { - components.SecuritySchemes[s] = ConvertSecurityScheme(securityScheme) - } - if v2Responses != nil { - components.Responses = make(map[string]*spec3.Response) - } - for r, response := range v2Responses { - components.Responses[r] = ConvertResponse(&response, produces) - } - - return components -} - -func ConvertSchema(v2Schema *spec.Schema) *spec.Schema { - if v2Schema == nil { - return nil - } - v3Schema := spec.Schema{ - VendorExtensible: v2Schema.VendorExtensible, - SchemaProps: v2Schema.SchemaProps, - SwaggerSchemaProps: v2Schema.SwaggerSchemaProps, - ExtraProps: v2Schema.ExtraProps, - } - - if refString := v2Schema.Ref.String(); refString != "" { - if idx := strings.Index(refString, OpenAPIV2DefPrefix); idx != -1 { - v3Schema.Ref = spec.MustCreateRef(OpenAPIV3DefPrefix + refString[idx+len(OpenAPIV2DefPrefix):]) - } else { - klog.Errorf("Error: Swagger V2 Ref %s does not contain #/definitions\n", refString) - } - } - - if v2Schema.Properties != nil { - v3Schema.Properties = make(map[string]spec.Schema) - for key, property := range v2Schema.Properties { - v3Schema.Properties[key] = *ConvertSchema(&property) - } - } - if v2Schema.Items != nil { - v3Schema.Items = &spec.SchemaOrArray{ - Schema: ConvertSchema(v2Schema.Items.Schema), - Schemas: ConvertSchemaList(v2Schema.Items.Schemas), - } - } - - if v2Schema.AdditionalProperties != nil { - v3Schema.AdditionalProperties = &spec.SchemaOrBool{ - Schema: ConvertSchema(v2Schema.AdditionalProperties.Schema), - Allows: v2Schema.AdditionalProperties.Allows, - } - } - if v2Schema.AdditionalItems != nil { - v3Schema.AdditionalItems = &spec.SchemaOrBool{ - Schema: ConvertSchema(v2Schema.AdditionalItems.Schema), - Allows: v2Schema.AdditionalItems.Allows, - } - } - - return builderutil.WrapRefs(&v3Schema) -} - -func ConvertSchemaList(v2SchemaList []spec.Schema) []spec.Schema { - if v2SchemaList == nil { - return nil - } - v3SchemaList := []spec.Schema{} - for _, s := range v2SchemaList { - v3SchemaList = append(v3SchemaList, *ConvertSchema(&s)) - } - return v3SchemaList -} - -func ConvertSecurityScheme(v2securityScheme *spec.SecurityScheme) *spec3.SecurityScheme { - if v2securityScheme == nil { - return nil - } - securityScheme := &spec3.SecurityScheme{ - VendorExtensible: v2securityScheme.VendorExtensible, - SecuritySchemeProps: spec3.SecuritySchemeProps{ - Description: v2securityScheme.Description, - Type: v2securityScheme.Type, - Name: v2securityScheme.Name, - In: v2securityScheme.In, - }, - } - - if v2securityScheme.Flow != "" { - securityScheme.Flows = make(map[string]*spec3.OAuthFlow) - securityScheme.Flows[v2securityScheme.Flow] = &spec3.OAuthFlow{ - OAuthFlowProps: spec3.OAuthFlowProps{ - AuthorizationUrl: v2securityScheme.AuthorizationURL, - TokenUrl: v2securityScheme.TokenURL, - Scopes: v2securityScheme.Scopes, - }, - } - } - return securityScheme -} - -func ConvertPaths(v2Paths *spec.Paths) *spec3.Paths { - if v2Paths == nil { - return nil - } - paths := &spec3.Paths{ - VendorExtensible: v2Paths.VendorExtensible, - } - - if v2Paths.Paths != nil { - paths.Paths = make(map[string]*spec3.Path) - } - for k, v := range v2Paths.Paths { - paths.Paths[k] = ConvertPathItem(v) - } - return paths -} - -func ConvertPathItem(v2pathItem spec.PathItem) *spec3.Path { - path := &spec3.Path{ - Refable: v2pathItem.Refable, - PathProps: spec3.PathProps{ - Get: ConvertOperation(v2pathItem.Get), - Put: ConvertOperation(v2pathItem.Put), - Post: ConvertOperation(v2pathItem.Post), - Delete: ConvertOperation(v2pathItem.Delete), - Options: ConvertOperation(v2pathItem.Options), - Head: ConvertOperation(v2pathItem.Head), - Patch: ConvertOperation(v2pathItem.Patch), - }, - VendorExtensible: v2pathItem.VendorExtensible, - } - for _, param := range v2pathItem.Parameters { - path.Parameters = append(path.Parameters, ConvertParameter(param)) - } - return path -} - -func ConvertOperation(v2Operation *spec.Operation) *spec3.Operation { - if v2Operation == nil { - return nil - } - operation := &spec3.Operation{ - VendorExtensible: v2Operation.VendorExtensible, - OperationProps: spec3.OperationProps{ - Description: v2Operation.Description, - ExternalDocs: ConvertExternalDocumentation(v2Operation.OperationProps.ExternalDocs), - Tags: v2Operation.Tags, - Summary: v2Operation.Summary, - Deprecated: v2Operation.Deprecated, - OperationId: v2Operation.ID, - }, - } - - for _, param := range v2Operation.Parameters { - if param.ParamProps.Name == "body" && param.ParamProps.Schema != nil { - operation.OperationProps.RequestBody = &spec3.RequestBody{ - RequestBodyProps: spec3.RequestBodyProps{}, - } - if v2Operation.Consumes != nil { - operation.RequestBody.Content = make(map[string]*spec3.MediaType) - } - for _, consumer := range v2Operation.Consumes { - operation.RequestBody.Content[consumer] = &spec3.MediaType{ - MediaTypeProps: spec3.MediaTypeProps{ - Schema: ConvertSchema(param.ParamProps.Schema), - }, - } - } - } else { - operation.Parameters = append(operation.Parameters, ConvertParameter(param)) - } - } - - operation.Responses = &spec3.Responses{ResponsesProps: spec3.ResponsesProps{ - Default: ConvertResponse(v2Operation.Responses.Default, v2Operation.Produces), - }, - VendorExtensible: v2Operation.Responses.VendorExtensible, - } - - if v2Operation.Responses.StatusCodeResponses != nil { - operation.Responses.StatusCodeResponses = make(map[int]*spec3.Response) - } - for k, v := range v2Operation.Responses.StatusCodeResponses { - operation.Responses.StatusCodeResponses[k] = ConvertResponse(&v, v2Operation.Produces) - } - return operation -} - -func ConvertResponse(v2Response *spec.Response, produces []string) *spec3.Response { - if v2Response == nil { - return nil - } - response := &spec3.Response{ - Refable: ConvertRefableResponse(v2Response.Refable), - VendorExtensible: v2Response.VendorExtensible, - ResponseProps: spec3.ResponseProps{ - Description: v2Response.Description, - }, - } - - if v2Response.Schema != nil { - if produces != nil { - response.Content = make(map[string]*spec3.MediaType) - } - for _, producer := range produces { - response.ResponseProps.Content[producer] = &spec3.MediaType{ - MediaTypeProps: spec3.MediaTypeProps{ - Schema: ConvertSchema(v2Response.Schema), - }, - } - } - } - return response -} - -func ConvertParameter(v2Param spec.Parameter) *spec3.Parameter { - param := &spec3.Parameter{ - Refable: ConvertRefableParameter(v2Param.Refable), - VendorExtensible: v2Param.VendorExtensible, - ParameterProps: spec3.ParameterProps{ - Name: v2Param.Name, - Description: v2Param.Description, - In: v2Param.In, - Required: v2Param.Required, - Schema: ConvertSchema(v2Param.Schema), - AllowEmptyValue: v2Param.AllowEmptyValue, - }, - } - // Convert SimpleSchema into Schema - if param.Schema == nil { - param.Schema = &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{v2Param.Type}, - Format: v2Param.Format, - UniqueItems: v2Param.UniqueItems, - }, - } - } - - return param -} - -func ConvertRefableParameter(refable spec.Refable) spec.Refable { - if refable.Ref.String() != "" { - return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/parameters/", "#/components/parameters/", 1))} - } - return refable -} - -func ConvertRefableResponse(refable spec.Refable) spec.Refable { - if refable.Ref.String() != "" { - return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/responses/", "#/components/responses/", 1))} - } - return refable -} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go b/vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go deleted file mode 100644 index 3fac658e3c..0000000000 --- a/vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go +++ /dev/null @@ -1,519 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package schemamutation - -import ( - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// Walker runs callback functions on all references of an OpenAPI spec, -// replacing the values when visiting corresponding types. -type Walker struct { - // SchemaCallback will be called on each schema, taking the original schema, - // and before any other callbacks of the Walker. - // If the schema needs to be mutated, DO NOT mutate it in-place, - // always create a copy, mutate, and return it. - SchemaCallback func(schema *spec.Schema) *spec.Schema - - // RefCallback will be called on each ref. - // If the ref needs to be mutated, DO NOT mutate it in-place, - // always create a copy, mutate, and return it. - RefCallback func(ref *spec.Ref) *spec.Ref -} - -type SchemaCallbackFunc func(schema *spec.Schema) *spec.Schema -type RefCallbackFunc func(ref *spec.Ref) *spec.Ref - -var SchemaCallBackNoop SchemaCallbackFunc = func(schema *spec.Schema) *spec.Schema { - return schema -} -var RefCallbackNoop RefCallbackFunc = func(ref *spec.Ref) *spec.Ref { - return ref -} - -// ReplaceReferences rewrites the references without mutating the input. -// The output might share data with the input. -func ReplaceReferences(walkRef func(ref *spec.Ref) *spec.Ref, sp *spec.Swagger) *spec.Swagger { - walker := &Walker{RefCallback: walkRef, SchemaCallback: SchemaCallBackNoop} - return walker.WalkRoot(sp) -} - -func (w *Walker) WalkSchema(schema *spec.Schema) *spec.Schema { - if schema == nil { - return nil - } - - orig := schema - clone := func() { - if orig == schema { - schema = &spec.Schema{} - *schema = *orig - } - } - - // Always run callback on the whole schema first - // so that SchemaCallback can take the original schema as input. - schema = w.SchemaCallback(schema) - - if r := w.RefCallback(&schema.Ref); r != &schema.Ref { - clone() - schema.Ref = *r - } - - definitionsCloned := false - for k, v := range schema.Definitions { - if s := w.WalkSchema(&v); s != &v { - if !definitionsCloned { - definitionsCloned = true - clone() - schema.Definitions = make(spec.Definitions, len(orig.Definitions)) - for k2, v2 := range orig.Definitions { - schema.Definitions[k2] = v2 - } - } - schema.Definitions[k] = *s - } - } - - propertiesCloned := false - for k, v := range schema.Properties { - if s := w.WalkSchema(&v); s != &v { - if !propertiesCloned { - propertiesCloned = true - clone() - schema.Properties = make(map[string]spec.Schema, len(orig.Properties)) - for k2, v2 := range orig.Properties { - schema.Properties[k2] = v2 - } - } - schema.Properties[k] = *s - } - } - - patternPropertiesCloned := false - for k, v := range schema.PatternProperties { - if s := w.WalkSchema(&v); s != &v { - if !patternPropertiesCloned { - patternPropertiesCloned = true - clone() - schema.PatternProperties = make(map[string]spec.Schema, len(orig.PatternProperties)) - for k2, v2 := range orig.PatternProperties { - schema.PatternProperties[k2] = v2 - } - } - schema.PatternProperties[k] = *s - } - } - - allOfCloned := false - for i := range schema.AllOf { - if s := w.WalkSchema(&schema.AllOf[i]); s != &schema.AllOf[i] { - if !allOfCloned { - allOfCloned = true - clone() - schema.AllOf = make([]spec.Schema, len(orig.AllOf)) - copy(schema.AllOf, orig.AllOf) - } - schema.AllOf[i] = *s - } - } - - anyOfCloned := false - for i := range schema.AnyOf { - if s := w.WalkSchema(&schema.AnyOf[i]); s != &schema.AnyOf[i] { - if !anyOfCloned { - anyOfCloned = true - clone() - schema.AnyOf = make([]spec.Schema, len(orig.AnyOf)) - copy(schema.AnyOf, orig.AnyOf) - } - schema.AnyOf[i] = *s - } - } - - oneOfCloned := false - for i := range schema.OneOf { - if s := w.WalkSchema(&schema.OneOf[i]); s != &schema.OneOf[i] { - if !oneOfCloned { - oneOfCloned = true - clone() - schema.OneOf = make([]spec.Schema, len(orig.OneOf)) - copy(schema.OneOf, orig.OneOf) - } - schema.OneOf[i] = *s - } - } - - if schema.Not != nil { - if s := w.WalkSchema(schema.Not); s != schema.Not { - clone() - schema.Not = s - } - } - - if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - if s := w.WalkSchema(schema.AdditionalProperties.Schema); s != schema.AdditionalProperties.Schema { - clone() - schema.AdditionalProperties = &spec.SchemaOrBool{Schema: s, Allows: schema.AdditionalProperties.Allows} - } - } - - if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { - if s := w.WalkSchema(schema.AdditionalItems.Schema); s != schema.AdditionalItems.Schema { - clone() - schema.AdditionalItems = &spec.SchemaOrBool{Schema: s, Allows: schema.AdditionalItems.Allows} - } - } - - if schema.Items != nil { - if schema.Items.Schema != nil { - if s := w.WalkSchema(schema.Items.Schema); s != schema.Items.Schema { - clone() - schema.Items = &spec.SchemaOrArray{Schema: s} - } - } else { - itemsCloned := false - for i := range schema.Items.Schemas { - if s := w.WalkSchema(&schema.Items.Schemas[i]); s != &schema.Items.Schemas[i] { - if !itemsCloned { - clone() - schema.Items = &spec.SchemaOrArray{ - Schemas: make([]spec.Schema, len(orig.Items.Schemas)), - } - itemsCloned = true - copy(schema.Items.Schemas, orig.Items.Schemas) - } - schema.Items.Schemas[i] = *s - } - } - } - } - - return schema -} - -func (w *Walker) walkParameter(param *spec.Parameter) *spec.Parameter { - if param == nil { - return nil - } - - orig := param - cloned := false - clone := func() { - if !cloned { - cloned = true - param = &spec.Parameter{} - *param = *orig - } - } - - if r := w.RefCallback(&param.Ref); r != &param.Ref { - clone() - param.Ref = *r - } - if s := w.WalkSchema(param.Schema); s != param.Schema { - clone() - param.Schema = s - } - if param.Items != nil { - if r := w.RefCallback(&param.Items.Ref); r != &param.Items.Ref { - param.Items.Ref = *r - } - } - - return param -} - -func (w *Walker) walkParameters(params []spec.Parameter) ([]spec.Parameter, bool) { - if params == nil { - return nil, false - } - - orig := params - cloned := false - clone := func() { - if !cloned { - cloned = true - params = make([]spec.Parameter, len(params)) - copy(params, orig) - } - } - - for i := range params { - if s := w.walkParameter(&params[i]); s != &params[i] { - clone() - params[i] = *s - } - } - - return params, cloned -} - -func (w *Walker) walkResponse(resp *spec.Response) *spec.Response { - if resp == nil { - return nil - } - - orig := resp - cloned := false - clone := func() { - if !cloned { - cloned = true - resp = &spec.Response{} - *resp = *orig - } - } - - if r := w.RefCallback(&resp.Ref); r != &resp.Ref { - clone() - resp.Ref = *r - } - if s := w.WalkSchema(resp.Schema); s != resp.Schema { - clone() - resp.Schema = s - } - - return resp -} - -func (w *Walker) walkResponses(resps *spec.Responses) *spec.Responses { - if resps == nil { - return nil - } - - orig := resps - cloned := false - clone := func() { - if !cloned { - cloned = true - resps = &spec.Responses{} - *resps = *orig - } - } - - if r := w.walkResponse(resps.ResponsesProps.Default); r != resps.ResponsesProps.Default { - clone() - resps.Default = r - } - - responsesCloned := false - for k, v := range resps.ResponsesProps.StatusCodeResponses { - if r := w.walkResponse(&v); r != &v { - if !responsesCloned { - responsesCloned = true - clone() - resps.ResponsesProps.StatusCodeResponses = make(map[int]spec.Response, len(orig.StatusCodeResponses)) - for k2, v2 := range orig.StatusCodeResponses { - resps.ResponsesProps.StatusCodeResponses[k2] = v2 - } - } - resps.ResponsesProps.StatusCodeResponses[k] = *r - } - } - - return resps -} - -func (w *Walker) walkOperation(op *spec.Operation) *spec.Operation { - if op == nil { - return nil - } - - orig := op - cloned := false - clone := func() { - if !cloned { - cloned = true - op = &spec.Operation{} - *op = *orig - } - } - - parametersCloned := false - for i := range op.Parameters { - if s := w.walkParameter(&op.Parameters[i]); s != &op.Parameters[i] { - if !parametersCloned { - parametersCloned = true - clone() - op.Parameters = make([]spec.Parameter, len(orig.Parameters)) - copy(op.Parameters, orig.Parameters) - } - op.Parameters[i] = *s - } - } - - if r := w.walkResponses(op.Responses); r != op.Responses { - clone() - op.Responses = r - } - - return op -} - -func (w *Walker) walkPathItem(pathItem *spec.PathItem) *spec.PathItem { - if pathItem == nil { - return nil - } - - orig := pathItem - cloned := false - clone := func() { - if !cloned { - cloned = true - pathItem = &spec.PathItem{} - *pathItem = *orig - } - } - - if p, changed := w.walkParameters(pathItem.Parameters); changed { - clone() - pathItem.Parameters = p - } - if op := w.walkOperation(pathItem.Get); op != pathItem.Get { - clone() - pathItem.Get = op - } - if op := w.walkOperation(pathItem.Head); op != pathItem.Head { - clone() - pathItem.Head = op - } - if op := w.walkOperation(pathItem.Delete); op != pathItem.Delete { - clone() - pathItem.Delete = op - } - if op := w.walkOperation(pathItem.Options); op != pathItem.Options { - clone() - pathItem.Options = op - } - if op := w.walkOperation(pathItem.Patch); op != pathItem.Patch { - clone() - pathItem.Patch = op - } - if op := w.walkOperation(pathItem.Post); op != pathItem.Post { - clone() - pathItem.Post = op - } - if op := w.walkOperation(pathItem.Put); op != pathItem.Put { - clone() - pathItem.Put = op - } - - return pathItem -} - -func (w *Walker) walkPaths(paths *spec.Paths) *spec.Paths { - if paths == nil { - return nil - } - - orig := paths - cloned := false - clone := func() { - if !cloned { - cloned = true - paths = &spec.Paths{} - *paths = *orig - } - } - - pathsCloned := false - for k, v := range paths.Paths { - if p := w.walkPathItem(&v); p != &v { - if !pathsCloned { - pathsCloned = true - clone() - paths.Paths = make(map[string]spec.PathItem, len(orig.Paths)) - for k2, v2 := range orig.Paths { - paths.Paths[k2] = v2 - } - } - paths.Paths[k] = *p - } - } - - return paths -} - -func (w *Walker) WalkRoot(swagger *spec.Swagger) *spec.Swagger { - if swagger == nil { - return nil - } - - orig := swagger - cloned := false - clone := func() { - if !cloned { - cloned = true - swagger = &spec.Swagger{} - *swagger = *orig - } - } - - parametersCloned := false - for k, v := range swagger.Parameters { - if p := w.walkParameter(&v); p != &v { - if !parametersCloned { - parametersCloned = true - clone() - swagger.Parameters = make(map[string]spec.Parameter, len(orig.Parameters)) - for k2, v2 := range orig.Parameters { - swagger.Parameters[k2] = v2 - } - } - swagger.Parameters[k] = *p - } - } - - responsesCloned := false - for k, v := range swagger.Responses { - if r := w.walkResponse(&v); r != &v { - if !responsesCloned { - responsesCloned = true - clone() - swagger.Responses = make(map[string]spec.Response, len(orig.Responses)) - for k2, v2 := range orig.Responses { - swagger.Responses[k2] = v2 - } - } - swagger.Responses[k] = *r - } - } - - definitionsCloned := false - for k, v := range swagger.Definitions { - if s := w.WalkSchema(&v); s != &v { - if !definitionsCloned { - definitionsCloned = true - clone() - swagger.Definitions = make(spec.Definitions, len(orig.Definitions)) - for k2, v2 := range orig.Definitions { - swagger.Definitions[k2] = v2 - } - } - swagger.Definitions[k] = *s - } - } - - if swagger.Paths != nil { - if p := w.walkPaths(swagger.Paths); p != swagger.Paths { - clone() - swagger.Paths = p - } - } - - return swagger -} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go index 699291f1d8..1f62c6e772 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go @@ -32,6 +32,9 @@ type Encoding struct { // MarshalJSON is a custom marshal function that knows how to encode Encoding as JSON func (e *Encoding) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } b1, err := json.Marshal(e.EncodingProps) if err != nil { return nil, err @@ -43,6 +46,16 @@ func (e *Encoding) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (e *Encoding) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + EncodingProps encodingPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.EncodingProps = encodingPropsOmitZero(e.EncodingProps) + return opts.MarshalNext(enc, x) +} + func (e *Encoding) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, e) @@ -82,3 +95,11 @@ type EncodingProps struct { // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 AllowReserved bool `json:"allowReserved,omitempty"` } + +type encodingPropsOmitZero struct { + ContentType string `json:"contentType,omitempty"` + Headers map[string]*Header `json:"headers,omitempty"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go index 03b8727170..8834a92e6d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go @@ -36,6 +36,9 @@ type Example struct { // MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON func (e *Example) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } b1, err := json.Marshal(e.Refable) if err != nil { return nil, err @@ -50,6 +53,17 @@ func (e *Example) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b1, b2, b3), nil } +func (e *Example) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + ExampleProps `json:",inline"` + spec.Extensions + } + x.Ref = e.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.ExampleProps = e.ExampleProps + return opts.MarshalNext(enc, x) +} func (e *Example) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go index e79956721a..f0515496e4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go @@ -39,6 +39,9 @@ type ExternalDocumentationProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } b1, err := json.Marshal(e.ExternalDocumentationProps) if err != nil { return nil, err @@ -50,6 +53,16 @@ func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (e *ExternalDocumentation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ExternalDocumentationProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.ExternalDocumentationProps = e.ExternalDocumentationProps + return opts.MarshalNext(enc, x) +} + func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, e) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go index bc19dd48ed..08b6246ceb 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go @@ -35,6 +35,18 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ func(o *OpenAPI, c fuzz.Continue) { c.FuzzNoCustom(o) o.Version = "3.0.0" + for i, val := range o.SecurityRequirement { + if val == nil { + o.SecurityRequirement[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + }, func(r *interface{}, c fuzz.Continue) { switch c.Intn(3) { @@ -169,6 +181,21 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ c.Fuzz(&v.ResponseProps) c.Fuzz(&v.VendorExtensible) }, + func(v *Operation, c fuzz.Continue) { + c.FuzzNoCustom(v) + // Do not fuzz null values into the array. + for i, val := range v.SecurityRequirement { + if val == nil { + v.SecurityRequirement[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + }, func(v *spec.Extensions, c fuzz.Continue) { numChildren := c.Intn(5) for i := 0; i < numChildren; i++ { diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go index ee5a30f797..9ea30628ce 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go @@ -36,6 +36,9 @@ type Header struct { // MarshalJSON is a custom marshal function that knows how to encode Header as JSON func (h *Header) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(h) + } b1, err := json.Marshal(h.Refable) if err != nil { return nil, err @@ -51,6 +54,18 @@ func (h *Header) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (h *Header) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + HeaderProps headerPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = h.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(h.Extensions) + x.HeaderProps = headerPropsOmitZero(h.HeaderProps) + return opts.MarshalNext(enc, x) +} + func (h *Header) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, h) @@ -109,3 +124,19 @@ type HeaderProps struct { // Examples of the header Examples map[string]*Example `json:"examples,omitempty"` } + +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type headerPropsOmitZero struct { + Description string `json:"description,omitempty"` + Required bool `json:"required,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` + Schema *spec.Schema `json:"schema,omitzero"` + Content map[string]*MediaType `json:"content,omitempty"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go index d390e69bcf..47eef1edb0 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go @@ -35,6 +35,9 @@ type MediaType struct { // MarshalJSON is a custom marshal function that knows how to encode MediaType as JSON func (m *MediaType) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(m) + } b1, err := json.Marshal(m.MediaTypeProps) if err != nil { return nil, err @@ -46,6 +49,16 @@ func (m *MediaType) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (e *MediaType) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + MediaTypeProps mediaTypePropsOmitZero `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.MediaTypeProps = mediaTypePropsOmitZero(e.MediaTypeProps) + return opts.MarshalNext(enc, x) +} + func (m *MediaType) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, m) @@ -84,3 +97,10 @@ type MediaTypeProps struct { // A map between a property name and its encoding information. The key, being the property name, MUST exist in the schema as a property. The encoding object SHALL only apply to requestBody objects when the media type is multipart or application/x-www-form-urlencoded Encoding map[string]*Encoding `json:"encoding,omitempty"` } + +type mediaTypePropsOmitZero struct { + Schema *spec.Schema `json:"schema,omitzero"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` + Encoding map[string]*Encoding `json:"encoding,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go index 28230610bd..f1e1025479 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go @@ -35,6 +35,9 @@ type Operation struct { // MarshalJSON is a custom marshal function that knows how to encode Operation as JSON func (o *Operation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(o) + } b1, err := json.Marshal(o.OperationProps) if err != nil { return nil, err @@ -46,6 +49,16 @@ func (o *Operation) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (o *Operation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + spec.Extensions + OperationProps operationPropsOmitZero `json:",inline"` + } + x.Extensions = internal.SanitizeExtensions(o.Extensions) + x.OperationProps = operationPropsOmitZero(o.OperationProps) + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON hydrates this items instance with the data from JSON func (o *Operation) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { @@ -95,3 +108,17 @@ type OperationProps struct { // Servers contains an alternative server array to service this operation Servers []*Server `json:"servers,omitempty"` } + +type operationPropsOmitZero struct { + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + Description string `json:"description,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + OperationId string `json:"operationId,omitempty"` + Parameters []*Parameter `json:"parameters,omitempty"` + RequestBody *RequestBody `json:"requestBody,omitzero"` + Responses *Responses `json:"responses,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + SecurityRequirement []map[string][]string `json:"security,omitempty"` + Servers []*Server `json:"servers,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go index 613da71a6d..ada7edb637 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go @@ -36,6 +36,9 @@ type Parameter struct { // MarshalJSON is a custom marshal function that knows how to encode Parameter as JSON func (p *Parameter) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -51,6 +54,18 @@ func (p *Parameter) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (p *Parameter) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + ParameterProps parameterPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.ParameterProps = parameterPropsOmitZero(p.ParameterProps) + return opts.MarshalNext(enc, x) +} + func (p *Parameter) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, p) @@ -114,3 +129,19 @@ type ParameterProps struct { // Examples of the parameter's potential value. Each example SHOULD contain a value in the correct format as specified in the parameter encoding Examples map[string]*Example `json:"examples,omitempty"` } + +type parameterPropsOmitZero struct { + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Description string `json:"description,omitempty"` + Required bool `json:"required,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` + Schema *spec.Schema `json:"schema,omitzero"` + Content map[string]*MediaType `json:"content,omitempty"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go index 40d9061ace..16fbbb4dd9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go @@ -35,15 +35,41 @@ type Paths struct { // MarshalJSON is a custom marshal function that knows how to encode Paths as JSON func (p *Paths) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.Paths) + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } + b1, err := json.Marshal(p.VendorExtensible) if err != nil { return nil, err } - b2, err := json.Marshal(p.VendorExtensible) + + pths := make(map[string]*Path) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) if err != nil { return nil, err } - return swag.ConcatJSON(b1, b2), nil + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +func (p *Paths) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + m := make(map[string]any, len(p.Extensions)+len(p.Paths)) + for k, v := range p.Extensions { + if internal.IsExtensionKey(k) { + m[k] = v + } + } + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + m[k] = v + } + } + return opts.MarshalNext(enc, m) } // UnmarshalJSON hydrates this items instance with the data from JSON @@ -144,6 +170,9 @@ type Path struct { // MarshalJSON is a custom marshal function that knows how to encode Path as JSON func (p *Path) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -159,6 +188,18 @@ func (p *Path) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (p *Path) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + PathProps + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.PathProps = p.PathProps + return opts.MarshalNext(enc, x) +} + func (p *Path) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, p) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go index 33267ce675..6f8607e400 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go @@ -36,6 +36,9 @@ type RequestBody struct { // MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON func (r *RequestBody) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -51,6 +54,18 @@ func (r *RequestBody) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r *RequestBody) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + RequestBodyProps requestBodyPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.RequestBodyProps = requestBodyPropsOmitZero(r.RequestBodyProps) + return opts.MarshalNext(enc, x) +} + func (r *RequestBody) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) @@ -77,6 +92,12 @@ type RequestBodyProps struct { Required bool `json:"required,omitempty"` } +type requestBodyPropsOmitZero struct { + Description string `json:"description,omitempty"` + Content map[string]*MediaType `json:"content,omitempty"` + Required bool `json:"required,omitzero"` +} + func (r *RequestBody) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { var x struct { spec.Extensions diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go index 95b388e6c6..73e241fdc9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go @@ -37,6 +37,9 @@ type Responses struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (r *Responses) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.ResponsesProps) if err != nil { return nil, err @@ -48,6 +51,25 @@ func (r *Responses) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (r Responses) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + Default *Response `json:"default,omitzero"` + } + x.ArbitraryKeys = make(map[string]any, len(r.Extensions)+len(r.StatusCodeResponses)) + for k, v := range r.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range r.StatusCodeResponses { + x.ArbitraryKeys[strconv.Itoa(k)] = v + } + x.Default = r.Default + return opts.MarshalNext(enc, x) +} + func (r *Responses) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) @@ -179,6 +201,9 @@ type Response struct { // MarshalJSON is a custom marshal function that knows how to encode Response as JSON func (r *Response) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -194,6 +219,18 @@ func (r *Response) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r Response) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + ResponseProps `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.ResponseProps = r.ResponseProps + return opts.MarshalNext(enc, x) +} + func (r *Response) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) @@ -247,6 +284,9 @@ type Link struct { // MarshalJSON is a custom marshal function that knows how to encode Link as JSON func (r *Link) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -262,6 +302,18 @@ func (r *Link) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r *Link) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + LinkProps `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.LinkProps = r.LinkProps + return opts.MarshalNext(enc, x) +} + func (r *Link) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go index edf7e6de3f..dd1e98ed88 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go @@ -20,6 +20,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -32,6 +34,9 @@ type SecurityScheme struct { // MarshalJSON is a custom marshal function that knows how to encode SecurityScheme as JSON func (s *SecurityScheme) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SecuritySchemeProps) if err != nil { return nil, err @@ -47,6 +52,18 @@ func (s *SecurityScheme) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (s *SecurityScheme) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + SecuritySchemeProps `json:",inline"` + spec.Extensions + } + x.Ref = s.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SecuritySchemeProps = s.SecuritySchemeProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON hydrates this items instance with the data from JSON func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go index d5df0a7811..654a42c06e 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go @@ -41,6 +41,9 @@ type ServerProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (s *Server) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.ServerProps) if err != nil { return nil, err @@ -52,6 +55,16 @@ func (s *Server) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (s *Server) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ServerProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.ServerProps = s.ServerProps + return opts.MarshalNext(enc, x) +} + func (s *Server) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, s) @@ -96,6 +109,9 @@ type ServerVariableProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (s *ServerVariable) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.ServerVariableProps) if err != nil { return nil, err @@ -107,6 +123,16 @@ func (s *ServerVariable) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (s *ServerVariable) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ServerVariableProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.ServerVariableProps = s.ServerVariableProps + return opts.MarshalNext(enc, x) +} + func (s *ServerVariable) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, s) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go index bed096fb76..5db819c7f0 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go @@ -36,6 +36,8 @@ type OpenAPI struct { Servers []*Server `json:"servers,omitempty"` // Components hold various schemas for the specification Components *Components `json:"components,omitempty"` + // SecurityRequirement holds a declaration of which security mechanisms can be used across the API + SecurityRequirement []map[string][]string `json:"security,omitempty"` // ExternalDocs holds additional external documentation ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` } @@ -48,3 +50,26 @@ func (o *OpenAPI) UnmarshalJSON(data []byte) error { } return json.Unmarshal(data, &p) } + +func (o *OpenAPI) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(o) + } + type OpenAPIWithNoFunctions OpenAPI + p := (*OpenAPIWithNoFunctions)(o) + return json.Marshal(&p) +} + +func (o *OpenAPI) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type OpenAPIOmitZero struct { + Version string `json:"openapi"` + Info *spec.Info `json:"info"` + Paths *Paths `json:"paths,omitzero"` + Servers []*Server `json:"servers,omitempty"` + Components *Components `json:"components,omitzero"` + SecurityRequirement []map[string][]string `json:"security,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + } + x := (*OpenAPIOmitZero)(o) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go deleted file mode 100644 index c66f998f51..0000000000 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go +++ /dev/null @@ -1,502 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec - -import ( - "github.com/go-openapi/jsonreference" - "github.com/google/go-cmp/cmp" - fuzz "github.com/google/gofuzz" -) - -var SwaggerFuzzFuncs []interface{} = []interface{}{ - func(v *Responses, c fuzz.Continue) { - c.FuzzNoCustom(v) - if v.Default != nil { - // Check if we hit maxDepth and left an incomplete value - if v.Default.Description == "" { - v.Default = nil - v.StatusCodeResponses = nil - } - } - - // conversion has no way to discern empty statusCodeResponses from - // nil, since "default" is always included in the map. - // So avoid empty responses list - if len(v.StatusCodeResponses) == 0 { - v.StatusCodeResponses = nil - } - }, - func(v *Operation, c fuzz.Continue) { - c.FuzzNoCustom(v) - - if v != nil { - // force non-nil - v.Responses = &Responses{} - c.Fuzz(v.Responses) - - v.Schemes = nil - if c.RandBool() { - v.Schemes = append(v.Schemes, "http") - } - - if c.RandBool() { - v.Schemes = append(v.Schemes, "https") - } - - if c.RandBool() { - v.Schemes = append(v.Schemes, "ws") - } - - if c.RandBool() { - v.Schemes = append(v.Schemes, "wss") - } - - // Gnostic unconditionally makes security values non-null - // So do not fuzz null values into the array. - for i, val := range v.Security { - if val == nil { - v.Security[i] = make(map[string][]string) - } - - for k, v := range val { - if v == nil { - val[k] = make([]string, 0) - } - } - } - } - }, - func(v map[int]Response, c fuzz.Continue) { - n := 0 - c.Fuzz(&n) - if n == 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - return - } - - // Prevent negative numbers - num := c.Intn(4) - for i := 0; i < num+2; i++ { - val := Response{} - c.Fuzz(&val) - - val.Description = c.RandString() + "x" - v[100*(i+1)+c.Intn(100)] = val - } - }, - func(v map[string]PathItem, c fuzz.Continue) { - n := 0 - c.Fuzz(&n) - if n == 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - return - } - - num := c.Intn(5) - for i := 0; i < num+2; i++ { - val := PathItem{} - c.Fuzz(&val) - - // Ref params are only allowed in certain locations, so - // possibly add a few to PathItems - numRefsToAdd := c.Intn(5) - for i := 0; i < numRefsToAdd; i++ { - theRef := Parameter{} - c.Fuzz(&theRef.Refable) - - val.Parameters = append(val.Parameters, theRef) - } - - v["/"+c.RandString()] = val - } - }, - func(v *SchemaOrArray, c fuzz.Continue) { - *v = SchemaOrArray{} - // gnostic parser just doesn't support more - // than one Schema here - v.Schema = &Schema{} - c.Fuzz(&v.Schema) - - }, - func(v *SchemaOrBool, c fuzz.Continue) { - *v = SchemaOrBool{} - - if c.RandBool() { - v.Allows = c.RandBool() - } else { - v.Schema = &Schema{} - v.Allows = true - c.Fuzz(&v.Schema) - } - }, - func(v map[string]Response, c fuzz.Continue) { - n := 0 - c.Fuzz(&n) - if n == 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - return - } - - // Response definitions are not allowed to - // be refs - for i := 0; i < c.Intn(5)+1; i++ { - resp := &Response{} - - c.Fuzz(resp) - resp.Ref = Ref{} - resp.Description = c.RandString() + "x" - - // Response refs are not vendor extensible by gnostic - resp.VendorExtensible.Extensions = nil - v[c.RandString()+"x"] = *resp - } - }, - func(v *Header, c fuzz.Continue) { - if v != nil { - c.FuzzNoCustom(v) - - // descendant Items of Header may not be refs - cur := v.Items - for cur != nil { - cur.Ref = Ref{} - cur = cur.Items - } - } - }, - func(v *Ref, c fuzz.Continue) { - *v = Ref{} - v.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString()) - }, - func(v *Response, c fuzz.Continue) { - *v = Response{} - if c.RandBool() { - v.Ref = Ref{} - v.Ref.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString()) - } else { - c.Fuzz(&v.VendorExtensible) - c.Fuzz(&v.Schema) - c.Fuzz(&v.ResponseProps) - - v.Headers = nil - v.Ref = Ref{} - - n := 0 - c.Fuzz(&n) - if n != 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - num := c.Intn(4) - for i := 0; i < num; i++ { - if v.Headers == nil { - v.Headers = make(map[string]Header) - } - hdr := Header{} - c.Fuzz(&hdr) - if hdr.Type == "" { - // hit maxDepth, just abort trying to make haders - v.Headers = nil - break - } - v.Headers[c.RandString()+"x"] = hdr - } - } else { - v.Headers = nil - } - } - - v.Description = c.RandString() + "x" - - // Gnostic parses empty as nil, so to keep avoid putting empty - if len(v.Headers) == 0 { - v.Headers = nil - } - }, - func(v **Info, c fuzz.Continue) { - // Info is never nil - *v = &Info{} - c.FuzzNoCustom(*v) - - (*v).Title = c.RandString() + "x" - }, - func(v *Extensions, c fuzz.Continue) { - // gnostic parser only picks up x- vendor extensions - numChildren := c.Intn(5) - for i := 0; i < numChildren; i++ { - if *v == nil { - *v = Extensions{} - } - (*v)["x-"+c.RandString()] = c.RandString() - } - }, - func(v *Swagger, c fuzz.Continue) { - c.FuzzNoCustom(v) - - if v.Paths == nil { - // Force paths non-nil since it does not have omitempty in json tag. - // This means a perfect roundtrip (via json) is impossible, - // since we can't tell the difference between empty/unspecified paths - v.Paths = &Paths{} - c.Fuzz(v.Paths) - } - - v.Swagger = "2.0" - - // Gnostic support serializing ID at all - // unavoidable data loss - v.ID = "" - - v.Schemes = nil - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "http") - } - - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "https") - } - - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "ws") - } - - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "wss") - } - - // Gnostic unconditionally makes security values non-null - // So do not fuzz null values into the array. - for i, val := range v.Security { - if val == nil { - v.Security[i] = make(map[string][]string) - } - - for k, v := range val { - if v == nil { - val[k] = make([]string, 0) - } - } - } - }, - func(v *SecurityScheme, c fuzz.Continue) { - v.Description = c.RandString() + "x" - c.Fuzz(&v.VendorExtensible) - - switch c.Intn(3) { - case 0: - v.Type = "basic" - case 1: - v.Type = "apiKey" - switch c.Intn(2) { - case 0: - v.In = "header" - case 1: - v.In = "query" - default: - panic("unreachable") - } - v.Name = "x" + c.RandString() - case 2: - v.Type = "oauth2" - - switch c.Intn(4) { - case 0: - v.Flow = "accessCode" - v.TokenURL = "https://" + c.RandString() - v.AuthorizationURL = "https://" + c.RandString() - case 1: - v.Flow = "application" - v.TokenURL = "https://" + c.RandString() - case 2: - v.Flow = "implicit" - v.AuthorizationURL = "https://" + c.RandString() - case 3: - v.Flow = "password" - v.TokenURL = "https://" + c.RandString() - default: - panic("unreachable") - } - c.Fuzz(&v.Scopes) - default: - panic("unreachable") - } - }, - func(v *interface{}, c fuzz.Continue) { - *v = c.RandString() + "x" - }, - func(v *string, c fuzz.Continue) { - *v = c.RandString() + "x" - }, - func(v *ExternalDocumentation, c fuzz.Continue) { - v.Description = c.RandString() + "x" - v.URL = c.RandString() + "x" - }, - func(v *SimpleSchema, c fuzz.Continue) { - c.FuzzNoCustom(v) - - switch c.Intn(5) { - case 0: - v.Type = "string" - case 1: - v.Type = "number" - case 2: - v.Type = "boolean" - case 3: - v.Type = "integer" - case 4: - v.Type = "array" - default: - panic("unreachable") - } - - switch c.Intn(5) { - case 0: - v.CollectionFormat = "csv" - case 1: - v.CollectionFormat = "ssv" - case 2: - v.CollectionFormat = "tsv" - case 3: - v.CollectionFormat = "pipes" - case 4: - v.CollectionFormat = "" - default: - panic("unreachable") - } - - // None of the types which include SimpleSchema in our definitions - // actually support "example" in the official spec - v.Example = nil - - // unsupported by openapi - v.Nullable = false - }, - func(v *int64, c fuzz.Continue) { - c.Fuzz(v) - - // Gnostic does not differentiate between 0 and non-specified - // so avoid using 0 for fuzzer - if *v == 0 { - *v = 1 - } - }, - func(v *float64, c fuzz.Continue) { - c.Fuzz(v) - - // Gnostic does not differentiate between 0 and non-specified - // so avoid using 0 for fuzzer - if *v == 0.0 { - *v = 1.0 - } - }, - func(v *Parameter, c fuzz.Continue) { - if v == nil { - return - } - c.Fuzz(&v.VendorExtensible) - if c.RandBool() { - // body param - v.Description = c.RandString() + "x" - v.Name = c.RandString() + "x" - v.In = "body" - c.Fuzz(&v.Description) - c.Fuzz(&v.Required) - - v.Schema = &Schema{} - c.Fuzz(&v.Schema) - - } else { - c.Fuzz(&v.SimpleSchema) - c.Fuzz(&v.CommonValidations) - v.AllowEmptyValue = false - v.Description = c.RandString() + "x" - v.Name = c.RandString() + "x" - - switch c.Intn(4) { - case 0: - // Header param - v.In = "header" - case 1: - // Form data param - v.In = "formData" - v.AllowEmptyValue = c.RandBool() - case 2: - // Query param - v.In = "query" - v.AllowEmptyValue = c.RandBool() - case 3: - // Path param - v.In = "path" - v.Required = true - default: - panic("unreachable") - } - - // descendant Items of Parameter may not be refs - cur := v.Items - for cur != nil { - cur.Ref = Ref{} - cur = cur.Items - } - } - }, - func(v *Schema, c fuzz.Continue) { - if c.RandBool() { - // file schema - c.Fuzz(&v.Default) - c.Fuzz(&v.Description) - c.Fuzz(&v.Example) - c.Fuzz(&v.ExternalDocs) - - c.Fuzz(&v.Format) - c.Fuzz(&v.ReadOnly) - c.Fuzz(&v.Required) - c.Fuzz(&v.Title) - v.Type = StringOrArray{"file"} - - } else { - // normal schema - c.Fuzz(&v.SchemaProps) - c.Fuzz(&v.SwaggerSchemaProps) - c.Fuzz(&v.VendorExtensible) - // c.Fuzz(&v.ExtraProps) - // ExtraProps will not roundtrip - gnostic throws out - // unrecognized keys - } - - // Not supported by official openapi v2 spec - // and stripped by k8s apiserver - v.ID = "" - v.AnyOf = nil - v.OneOf = nil - v.Not = nil - v.Nullable = false - v.AdditionalItems = nil - v.Schema = "" - v.PatternProperties = nil - v.Definitions = nil - v.Dependencies = nil - }, -} - -var SwaggerDiffOptions = []cmp.Option{ - // cmp.Diff panics on Ref since jsonreference.Ref uses unexported fields - cmp.Comparer(func(a Ref, b Ref) bool { - return a.String() == b.String() - }), -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 68bd8fd862..7d02289fe0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -55,7 +55,7 @@ github.com/chai2010/gettext-go/po # github.com/cilium/charts v0.0.0-20231113210111-5a4126644d7d ## explicit; go 1.17 github.com/cilium/charts -# github.com/cilium/cilium v1.15.0-pre.2 +# github.com/cilium/cilium v1.15.0-pre.3 ## explicit; go 1.21.0 github.com/cilium/cilium/api/v1/client github.com/cilium/cilium/api/v1/client/bgp @@ -129,6 +129,7 @@ github.com/cilium/cilium/pkg/logging/logfields github.com/cilium/cilium/pkg/mac github.com/cilium/cilium/pkg/metrics github.com/cilium/cilium/pkg/metrics/metric +github.com/cilium/cilium/pkg/metrics/metric/collections github.com/cilium/cilium/pkg/monitor/api github.com/cilium/cilium/pkg/monitor/notifications github.com/cilium/cilium/pkg/node/addressing @@ -137,12 +138,13 @@ github.com/cilium/cilium/pkg/policy/api github.com/cilium/cilium/pkg/promise github.com/cilium/cilium/pkg/safetime github.com/cilium/cilium/pkg/slices +github.com/cilium/cilium/pkg/source github.com/cilium/cilium/pkg/spanstat github.com/cilium/cilium/pkg/stream github.com/cilium/cilium/pkg/time github.com/cilium/cilium/pkg/version github.com/cilium/cilium/pkg/versioncheck -# github.com/cilium/ebpf v0.12.2 +# github.com/cilium/ebpf v0.12.3 ## explicit; go 1.20 github.com/cilium/ebpf github.com/cilium/ebpf/asm @@ -155,11 +157,10 @@ github.com/cilium/ebpf/internal/sysenc github.com/cilium/ebpf/internal/tracefs github.com/cilium/ebpf/internal/unix github.com/cilium/ebpf/link -github.com/cilium/ebpf/rlimit # github.com/cilium/hubble v0.12.2 ## explicit; go 1.20 github.com/cilium/hubble/pkg/printer -# github.com/cilium/proxy v0.0.0-20231018073547-ab187719b71b +# github.com/cilium/proxy v0.0.0-20231031145409-f19708f3d018 ## explicit; go 1.20 github.com/cilium/proxy/go/envoy/admin/v3 github.com/cilium/proxy/go/envoy/annotations @@ -535,7 +536,7 @@ github.com/dsnet/compress/bzip2/internal/sais github.com/dsnet/compress/internal github.com/dsnet/compress/internal/errors github.com/dsnet/compress/internal/prefix -# github.com/emicklei/go-restful/v3 v3.10.2 +# github.com/emicklei/go-restful/v3 v3.11.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log @@ -560,10 +561,11 @@ github.com/go-errors/errors # github.com/go-gorp/gorp/v3 v3.1.0 ## explicit; go 1.18 github.com/go-gorp/gorp/v3 -# github.com/go-logr/logr v1.2.4 -## explicit; go 1.16 +# github.com/go-logr/logr v1.3.0 +## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr +github.com/go-logr/logr/slogr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr @@ -583,8 +585,8 @@ github.com/go-openapi/analysis/internal/flatten/sortref # github.com/go-openapi/errors v0.20.4 ## explicit; go 1.14 github.com/go-openapi/errors -# github.com/go-openapi/jsonpointer v0.19.6 -## explicit; go 1.13 +# github.com/go-openapi/jsonpointer v0.20.0 +## explicit; go 1.18 github.com/go-openapi/jsonpointer # github.com/go-openapi/jsonreference v0.20.2 ## explicit; go 1.13 @@ -604,17 +606,17 @@ github.com/go-openapi/runtime/middleware/header github.com/go-openapi/runtime/middleware/untyped github.com/go-openapi/runtime/security github.com/go-openapi/runtime/yamlpc -# github.com/go-openapi/spec v0.20.9 +# github.com/go-openapi/spec v0.20.11 ## explicit; go 1.13 github.com/go-openapi/spec -# github.com/go-openapi/strfmt v0.21.7 +# github.com/go-openapi/strfmt v0.21.8 ## explicit; go 1.19 github.com/go-openapi/strfmt # github.com/go-openapi/swag v0.22.4 ## explicit; go 1.18 github.com/go-openapi/swag -# github.com/go-openapi/validate v0.22.1 -## explicit; go 1.14 +# github.com/go-openapi/validate v0.22.3 +## explicit; go 1.18 github.com/go-openapi/validate # github.com/gobwas/glob v0.2.3 ## explicit @@ -677,6 +679,10 @@ github.com/google/go-cmp/cmp/internal/value ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource +# github.com/google/gopacket v1.1.19 +## explicit; go 1.12 +github.com/google/gopacket +github.com/google/gopacket/layers # github.com/google/gops v0.3.28 ## explicit; go 1.17 github.com/google/gops/agent @@ -688,9 +694,12 @@ github.com/google/shlex # github.com/google/uuid v1.4.0 ## explicit github.com/google/uuid -# github.com/gorilla/mux v1.8.0 -## explicit; go 1.12 +# github.com/gorilla/mux v1.8.1 +## explicit; go 1.20 github.com/gorilla/mux +# github.com/gorilla/websocket v1.5.0 +## explicit; go 1.12 +github.com/gorilla/websocket # github.com/gosuri/uitable v0.0.4 ## explicit github.com/gosuri/uitable @@ -724,7 +733,7 @@ github.com/hashicorp/hcl/json/token # github.com/huandu/xstrings v1.4.0 ## explicit; go 1.12 github.com/huandu/xstrings -# github.com/imdario/mergo v0.3.13 +# github.com/imdario/mergo v0.3.16 ## explicit; go 1.13 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.1.0 @@ -840,6 +849,9 @@ github.com/morikuni/aec # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg +# github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f +## explicit +github.com/mxk/go-flowrate/flowrate # github.com/nwaples/rardecode v1.1.0 ## explicit github.com/nwaples/rardecode @@ -954,8 +966,8 @@ github.com/sourcegraph/conc/panics github.com/spf13/afero github.com/spf13/afero/internal/common github.com/spf13/afero/mem -# github.com/spf13/cast v1.5.1 -## explicit; go 1.18 +# github.com/spf13/cast v1.6.0 +## explicit; go 1.19 github.com/spf13/cast # github.com/spf13/cobra v1.8.0 ## explicit; go 1.15 @@ -993,7 +1005,7 @@ github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog github.com/ulikunitz/xz/lzma -# github.com/vishvananda/netlink v1.2.1-beta.2.0.20231024175852-77df5d35f725 +# github.com/vishvananda/netlink v1.2.1-beta.2.0.20231127184239-0ced8385386a ## explicit; go 1.12 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl @@ -1042,7 +1054,7 @@ github.com/zmap/zlint/v3/lints/etsi github.com/zmap/zlint/v3/lints/mozilla github.com/zmap/zlint/v3/lints/rfc github.com/zmap/zlint/v3/util -# go.mongodb.org/mongo-driver v1.11.9 +# go.mongodb.org/mongo-driver v1.12.0 ## explicit; go 1.13 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec @@ -1051,7 +1063,7 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore -# go.opentelemetry.io/otel v1.19.0 +# go.opentelemetry.io/otel v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1067,13 +1079,14 @@ go.opentelemetry.io/otel/semconv/internal/v2 go.opentelemetry.io/otel/semconv/v1.12.0 go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.17.0/httpconv -# go.opentelemetry.io/otel/metric v1.19.0 +# go.opentelemetry.io/otel/metric v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded -# go.opentelemetry.io/otel/trace v1.19.0 +# go.opentelemetry.io/otel/trace v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel/trace +go.opentelemetry.io/otel/trace/embedded # go.opentelemetry.io/proto/otlp v1.0.0 ## explicit; go 1.17 go.opentelemetry.io/proto/otlp/common/v1 @@ -1095,11 +1108,11 @@ go.uber.org/dig/internal/graph # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# go4.org/netipx v0.0.0-20230824141953-6213f710f925 +# go4.org/netipx v0.0.0-20231129151722-fdeea329fbba ## explicit; go 1.18 go4.org/netipx -# golang.org/x/crypto v0.14.0 -## explicit; go 1.17 +# golang.org/x/crypto v0.16.0 +## explicit; go 1.18 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 @@ -1118,7 +1131,7 @@ golang.org/x/crypto/pbkdf2 golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/scrypt -# golang.org/x/exp v0.0.0-20231006140011-7918f672742d +# golang.org/x/exp v0.0.0-20231127185646-65229373498e ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps @@ -1129,10 +1142,11 @@ golang.org/x/exp/slog/internal/buffer # golang.org/x/mod v0.14.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.17.0 -## explicit; go 1.17 -golang.org/x/net/context +# golang.org/x/net v0.19.0 +## explicit; go 1.18 golang.org/x/net/context/ctxhttp +golang.org/x/net/html +golang.org/x/net/html/atom golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack @@ -1141,7 +1155,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.12.0 +# golang.org/x/oauth2 v0.13.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -1149,17 +1163,17 @@ golang.org/x/oauth2/internal ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.13.0 -## explicit; go 1.17 +# golang.org/x/sys v0.15.0 +## explicit; go 1.18 golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.13.0 -## explicit; go 1.17 +# golang.org/x/term v0.15.0 +## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.13.0 -## explicit; go 1.17 +# golang.org/x/text v0.14.0 +## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/internal golang.org/x/text/encoding/internal/identifier @@ -1170,10 +1184,10 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.3.0 -## explicit +# golang.org/x/time v0.5.0 +## explicit; go 1.18 golang.org/x/time/rate -# google.golang.org/appengine v1.6.7 +# google.golang.org/appengine v1.6.8 ## explicit; go 1.11 google.golang.org/appengine/internal google.golang.org/appengine/internal/base @@ -1182,15 +1196,15 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a +# google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f ## explicit; go 1.19 google.golang.org/genproto/internal -# google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 +# google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 -# google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.59.0 @@ -1338,8 +1352,8 @@ helm.sh/helm/v3/pkg/storage/driver helm.sh/helm/v3/pkg/strvals helm.sh/helm/v3/pkg/time helm.sh/helm/v3/pkg/uploader -# k8s.io/api v0.28.4 -## explicit; go 1.20 +# k8s.io/api v0.29.0-rc.1 +## explicit; go 1.21 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -1372,7 +1386,7 @@ k8s.io/api/discovery/v1beta1 k8s.io/api/events/v1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 -k8s.io/api/flowcontrol/v1alpha1 +k8s.io/api/flowcontrol/v1 k8s.io/api/flowcontrol/v1beta1 k8s.io/api/flowcontrol/v1beta2 k8s.io/api/flowcontrol/v1beta3 @@ -1395,8 +1409,8 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.28.4 -## explicit; go 1.20 +# k8s.io/apiextensions-apiserver v0.29.0-rc.1 +## explicit; go 1.21 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 @@ -1406,8 +1420,8 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.28.4 -## explicit; go 1.20 +# k8s.io/apimachinery v0.29.0-rc.1 +## explicit; go 1.21 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -1449,6 +1463,7 @@ k8s.io/apimachinery/pkg/util/managedfields/internal k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/proxy k8s.io/apimachinery/pkg/util/remotecommand k8s.io/apimachinery/pkg/util/runtime k8s.io/apimachinery/pkg/util/sets @@ -1463,17 +1478,17 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.28.4 -## explicit; go 1.20 +# k8s.io/apiserver v0.29.0-rc.1 +## explicit; go 1.21 k8s.io/apiserver/pkg/endpoints/deprecation -# k8s.io/cli-runtime v0.28.2 -## explicit; go 1.20 +# k8s.io/cli-runtime v0.29.0-rc.1 +## explicit; go 1.21 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.28.4 -## explicit; go 1.20 +# k8s.io/client-go v0.29.0-rc.1 +## explicit; go 1.21 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -1498,7 +1513,7 @@ k8s.io/client-go/applyconfigurations/discovery/v1beta1 k8s.io/client-go/applyconfigurations/events/v1 k8s.io/client-go/applyconfigurations/events/v1beta1 k8s.io/client-go/applyconfigurations/extensions/v1beta1 -k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1 +k8s.io/client-go/applyconfigurations/flowcontrol/v1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3 @@ -1588,8 +1603,8 @@ k8s.io/client-go/kubernetes/typed/events/v1beta1 k8s.io/client-go/kubernetes/typed/events/v1beta1/fake k8s.io/client-go/kubernetes/typed/extensions/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake -k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1 -k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/fake +k8s.io/client-go/kubernetes/typed/flowcontrol/v1 +k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2 @@ -1674,6 +1689,7 @@ k8s.io/client-go/tools/remotecommand k8s.io/client-go/tools/watch k8s.io/client-go/transport k8s.io/client-go/transport/spdy +k8s.io/client-go/transport/websocket k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation k8s.io/client-go/util/exec @@ -1682,10 +1698,10 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.28.4 -## explicit; go 1.20 +# k8s.io/component-base v0.29.0-rc.1 +## explicit; go 1.21 k8s.io/component-base/version -# k8s.io/klog/v2 v2.100.1 +# k8s.io/klog/v2 v2.110.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -1693,17 +1709,15 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 +k8s.io/klog/v2/internal/sloghandler +# k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 ## explicit; go 1.19 -k8s.io/kube-openapi/pkg/builder3/util k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/handler3 k8s.io/kube-openapi/pkg/internal k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json -k8s.io/kube-openapi/pkg/openapiconv k8s.io/kube-openapi/pkg/schemaconv -k8s.io/kube-openapi/pkg/schemamutation k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto/validation @@ -1832,15 +1846,16 @@ sigs.k8s.io/kustomize/kyaml/yaml/merge2 sigs.k8s.io/kustomize/kyaml/yaml/merge3 sigs.k8s.io/kustomize/kyaml/yaml/schema sigs.k8s.io/kustomize/kyaml/yaml/walk -# sigs.k8s.io/structured-merge-diff/v4 v4.2.3 +# sigs.k8s.io/structured-merge-diff/v4 v4.4.1 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/merge sigs.k8s.io/structured-merge-diff/v4/schema sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value -# sigs.k8s.io/yaml v1.3.0 +# sigs.k8s.io/yaml v1.4.0 ## explicit; go 1.12 sigs.k8s.io/yaml +sigs.k8s.io/yaml/goyaml.v2 # go.universe.tf/metallb => github.com/cilium/metallb v0.1.1-0.20220829170633-5d7dfb1129f7 # sigs.k8s.io/controller-tools => github.com/cilium/controller-tools v0.8.0-1 diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go index 9b14ca581b..41fc2474a4 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go @@ -28,20 +28,15 @@ import ( // for PathElementSet and SetNodeMap, so we could probably share the // code. type PathElementValueMap struct { - members sortedPathElementValues + valueMap PathElementMap } func MakePathElementValueMap(size int) PathElementValueMap { return PathElementValueMap{ - members: make(sortedPathElementValues, 0, size), + valueMap: MakePathElementMap(size), } } -type pathElementValue struct { - PathElement PathElement - Value value.Value -} - type sortedPathElementValues []pathElementValue // Implement the sort interface; this would permit bulk creation, which would @@ -53,7 +48,40 @@ func (spev sortedPathElementValues) Less(i, j int) bool { func (spev sortedPathElementValues) Swap(i, j int) { spev[i], spev[j] = spev[j], spev[i] } // Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { + s.valueMap.Insert(pe, v) +} + +// Get retrieves the value associated with the given PathElement from the map. +// (nil, false) is returned if there is no such PathElement. +func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { + v, ok := s.valueMap.Get(pe) + if !ok { + return nil, false + } + return v.(value.Value), true +} + +// PathElementValueMap is a map from PathElement to interface{}. +type PathElementMap struct { + members sortedPathElementValues +} + +type pathElementValue struct { + PathElement PathElement + Value interface{} +} + +func MakePathElementMap(size int) PathElementMap { + return PathElementMap{ + members: make(sortedPathElementValues, 0, size), + } +} + +// Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. +func (s *PathElementMap) Insert(pe PathElement, v interface{}) { loc := sort.Search(len(s.members), func(i int) bool { return !s.members[i].PathElement.Less(pe) }) @@ -62,6 +90,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { return } if s.members[loc].PathElement.Equals(pe) { + s.members[loc].Value = v return } s.members = append(s.members, pathElementValue{}) @@ -71,7 +100,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { // Get retrieves the value associated with the given PathElement from the map. // (nil, false) is returned if there is no such PathElement. -func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { +func (s *PathElementMap) Get(pe PathElement) (interface{}, bool) { loc := sort.Search(len(s.members), func(i int) bool { return !s.members[i].PathElement.Less(pe) }) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go index 75a492d8ea..f1aa258609 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go @@ -112,7 +112,7 @@ func ConflictsFromManagers(sets fieldpath.ManagedFields) Conflicts { set.Set().Iterate(func(p fieldpath.Path) { conflicts = append(conflicts, Conflict{ Manager: manager, - Path: p, + Path: p.Copy(), }) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go index 1b23dcbd5e..d5a977d607 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go @@ -18,6 +18,7 @@ import ( "sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" ) // Converter is an interface to the conversion logic. The converter @@ -27,19 +28,39 @@ type Converter interface { IsMissingVersionError(error) bool } -// Updater is the object used to compute updated FieldSets and also -// merge the object on Apply. -type Updater struct { +// UpdateBuilder allows you to create a new Updater by exposing all of +// the options and setting them once. +type UpdaterBuilder struct { Converter Converter IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set - enableUnions bool + // Stop comparing the new object with old object after applying. + // This was initially used to avoid spurious etcd update, but + // since that's vastly inefficient, we've come-up with a better + // way of doing that. Create this flag to stop it. + // Comparing has become more expensive too now that we're not using + // `Compare` but `value.Equals` so this gives an option to avoid it. + ReturnInputOnNoop bool } -// EnableUnionFeature turns on union handling. It is disabled by default until the -// feature is complete. -func (s *Updater) EnableUnionFeature() { - s.enableUnions = true +func (u *UpdaterBuilder) BuildUpdater() *Updater { + return &Updater{ + Converter: u.Converter, + IgnoredFields: u.IgnoredFields, + returnInputOnNoop: u.ReturnInputOnNoop, + } +} + +// Updater is the object used to compute updated FieldSets and also +// merge the object on Apply. +type Updater struct { + // Deprecated: This will eventually become private. + Converter Converter + + // Deprecated: This will eventually become private. + IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set + + returnInputOnNoop bool } func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, *typed.Comparison, error) { @@ -126,12 +147,6 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp if err != nil { return nil, fieldpath.ManagedFields{}, err } - if s.enableUnions { - newObject, err = liveObject.NormalizeUnions(newObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } managers, compare, err := s.update(liveObject, newObject, version, managers, manager, true) if err != nil { return nil, fieldpath.ManagedFields{}, err @@ -145,7 +160,7 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp ignored = fieldpath.NewSet() } managers[manager] = fieldpath.NewVersionedSet( - managers[manager].Set().Union(compare.Modified).Union(compare.Added).Difference(compare.Removed).RecursiveDifference(ignored), + managers[manager].Set().Difference(compare.Removed).Union(compare.Modified).Union(compare.Added).RecursiveDifference(ignored), version, false, ) @@ -157,30 +172,17 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp // Apply should be called when Apply is run, given the current object as // well as the configuration that is applied. This will merge the object -// and return it. If the object hasn't changed, nil is returned (the -// managers can still have changed though). +// and return it. func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string, force bool) (*typed.TypedValue, fieldpath.ManagedFields, error) { var err error managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) if err != nil { return nil, fieldpath.ManagedFields{}, err } - if s.enableUnions { - configObject, err = configObject.NormalizeUnionsApply(configObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } newObject, err := liveObject.Merge(configObject) if err != nil { return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to merge config: %v", err) } - if s.enableUnions { - newObject, err = configObject.NormalizeUnionsApply(newObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } lastSet := managers[manager] set, err := configObject.ToFieldSet() if err != nil { @@ -200,11 +202,11 @@ func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fiel if err != nil { return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to prune fields: %v", err) } - managers, compare, err := s.update(liveObject, newObject, version, managers, manager, force) + managers, _, err = s.update(liveObject, newObject, version, managers, manager, force) if err != nil { return nil, fieldpath.ManagedFields{}, err } - if compare.IsSame() { + if !s.returnInputOnNoop && value.EqualsUsing(value.NewFreelistAllocator(), liveObject.AsValue(), newObject.AsValue()) { newObject = nil } return newObject, managers, nil @@ -218,7 +220,8 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel if lastSet == nil || lastSet.Set().Empty() { return merged, nil } - convertedMerged, err := s.Converter.Convert(merged, lastSet.APIVersion()) + version := lastSet.APIVersion() + convertedMerged, err := s.Converter.Convert(merged, version) if err != nil { if s.Converter.IsMissingVersionError(err) { return merged, nil @@ -228,7 +231,7 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel sc, tr := convertedMerged.Schema(), convertedMerged.TypeRef() pruned := convertedMerged.RemoveItems(lastSet.Set().EnsureNamedFieldsAreMembers(sc, tr)) - pruned, err = s.addBackOwnedItems(convertedMerged, pruned, managers, applyingManager) + pruned, err = s.addBackOwnedItems(convertedMerged, pruned, version, managers, applyingManager) if err != nil { return nil, fmt.Errorf("failed add back owned items: %v", err) } @@ -241,7 +244,7 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel // addBackOwnedItems adds back any fields, list and map items that were removed by prune, // but other appliers or updaters (or the current applier's new config) claim to own. -func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { +func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, prunedVersion fieldpath.APIVersion, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { var err error managedAtVersion := map[fieldpath.APIVersion]*fieldpath.Set{} for _, managerSet := range managedFields { @@ -252,7 +255,6 @@ func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFie } // Add back owned items at pruned version first to avoid conversion failure // caused by pruned fields which are required for conversion. - prunedVersion := fieldpath.APIVersion(*pruned.TypeRef().NamedType) if managed, ok := managedAtVersion[prunedVersion]; ok { merged, pruned, err = s.addBackOwnedItemsForVersion(merged, pruned, prunedVersion, managed) if err != nil { diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go index 7e5dc75827..5d3707a5b5 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go @@ -73,7 +73,7 @@ type Atom struct { } // Scalar (AKA "primitive") represents a type which has a single value which is -// either numeric, string, or boolean. +// either numeric, string, or boolean, or untyped for any of them. // // TODO: split numeric into float/int? Something even more fine-grained? type Scalar string @@ -82,6 +82,7 @@ const ( Numeric = Scalar("numeric") String = Scalar("string") Boolean = Scalar("boolean") + Untyped = Scalar("untyped") ) // ElementRelationship is an enum of the different possible relationships diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go index 7d64d1308c..6eb6c36df3 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go @@ -110,7 +110,7 @@ var SchemaSchemaYAML = `types: scalar: string - name: deduceInvalidDiscriminator type: - scalar: bool + scalar: boolean - name: fields type: list: @@ -145,6 +145,7 @@ var SchemaSchemaYAML = `types: list: elementType: scalar: string + elementRelationship: atomic - name: untyped map: fields: diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go new file mode 100644 index 0000000000..ed483cbbc4 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go @@ -0,0 +1,460 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// Comparison is the return value of a TypedValue.Compare() operation. +// +// No field will appear in more than one of the three fieldsets. If all of the +// fieldsets are empty, then the objects must have been equal. +type Comparison struct { + // Removed contains any fields removed by rhs (the right-hand-side + // object in the comparison). + Removed *fieldpath.Set + // Modified contains fields present in both objects but different. + Modified *fieldpath.Set + // Added contains any fields added by rhs. + Added *fieldpath.Set +} + +// IsSame returns true if the comparison returned no changes (the two +// compared objects are similar). +func (c *Comparison) IsSame() bool { + return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() +} + +// String returns a human readable version of the comparison. +func (c *Comparison) String() string { + bld := strings.Builder{} + if !c.Modified.Empty() { + bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) + } + if !c.Added.Empty() { + bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) + } + if !c.Removed.Empty() { + bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) + } + return bld.String() +} + +// ExcludeFields fields from the compare recursively removes the fields +// from the entire comparison +func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { + if fields == nil || fields.Empty() { + return c + } + c.Removed = c.Removed.RecursiveDifference(fields) + c.Modified = c.Modified.RecursiveDifference(fields) + c.Added = c.Added.RecursiveDifference(fields) + return c +} + +type compareWalker struct { + lhs value.Value + rhs value.Value + schema *schema.Schema + typeRef schema.TypeRef + + // Current path that we are comparing + path fieldpath.Path + + // Resulting comparison. + comparison *Comparison + + // internal housekeeping--don't set when constructing. + inLeaf bool // Set to true if we're in a "big leaf"--atomic map/list + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*compareWalker + + allocator value.Allocator +} + +// compare compares stuff. +func (w *compareWalker) compare(prefixFn func() string) (errs ValidationErrors) { + if w.lhs == nil && w.rhs == nil { + // check this condidition here instead of everywhere below. + return errorf("at least one of lhs and rhs must be provided") + } + a, ok := w.schema.Resolve(w.typeRef) + if !ok { + return errorf("schema error: no type found matching: %v", *w.typeRef.NamedType) + } + + alhs := deduceAtom(a, w.lhs) + arhs := deduceAtom(a, w.rhs) + + // deduceAtom does not fix the type for nil values + // nil is a wildcard and will accept whatever form the other operand takes + if w.rhs == nil { + errs = append(errs, handleAtom(alhs, w.typeRef, w)...) + } else if w.lhs == nil || alhs.Equals(&arhs) { + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } else { + w2 := *w + errs = append(errs, handleAtom(alhs, w.typeRef, &w2)...) + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } + + if !w.inLeaf { + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } + } + return errs.WithLazyPrefix(prefixFn) +} + +// doLeaf should be called on leaves before descending into children, if there +// will be a descent. It modifies w.inLeaf. +func (w *compareWalker) doLeaf() { + if w.inLeaf { + // We're in a "big leaf", an atomic map or list. Ignore + // subsequent leaves. + return + } + w.inLeaf = true + + // We don't recurse into leaf fields for merging. + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } else if !value.EqualsUsing(w.allocator, w.rhs, w.lhs) { + // TODO: Equality is not sufficient for this. + // Need to implement equality check on the value type. + w.comparison.Modified.Insert(w.path) + } +} + +func (w *compareWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) + } + + // All scalars are leaf fields. + w.doLeaf() + + return nil +} + +func (w *compareWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef, cmp *Comparison) *compareWalker { + if w.spareWalkers == nil { + // first descent. + w.spareWalkers = &[]*compareWalker{} + } + var w2 *compareWalker + if n := len(*w.spareWalkers); n > 0 { + w2, *w.spareWalkers = (*w.spareWalkers)[n-1], (*w.spareWalkers)[:n-1] + } else { + w2 = &compareWalker{} + } + *w2 = *w + w2.typeRef = tr + w2.path = append(w2.path, pe) + w2.lhs = nil + w2.rhs = nil + w2.comparison = cmp + return w2 +} + +func (w *compareWalker) finishDescent(w2 *compareWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + w.path = w2.path[:len(w2.path)-1] + *w.spareWalkers = append(*w.spareWalkers, w2) +} + +func (w *compareWalker) derefMap(prefix string, v value.Value) (value.Map, ValidationErrors) { + if v == nil { + return nil, nil + } + m, err := mapValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return m, nil +} + +func (w *compareWalker) visitListItems(t *schema.List, lhs, rhs value.List) (errs ValidationErrors) { + rLen := 0 + if rhs != nil { + rLen = rhs.Length() + } + lLen := 0 + if lhs != nil { + lLen = lhs.Length() + } + + maxLength := rLen + if lLen > maxLength { + maxLength = lLen + } + // Contains all the unique PEs between lhs and rhs, exactly once. + // Order doesn't matter since we're just tracking ownership in a set. + allPEs := make([]fieldpath.PathElement, 0, maxLength) + + // Gather all the elements from lhs, indexed by PE, in a list for duplicates. + lValues := fieldpath.MakePathElementMap(lLen) + for i := 0; i < lLen; i++ { + child := lhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + + if v, found := lValues.Get(pe); found { + list := v.([]value.Value) + lValues.Insert(pe, append(list, child)) + } else { + lValues.Insert(pe, []value.Value{child}) + allPEs = append(allPEs, pe) + } + } + + // Gather all the elements from rhs, indexed by PE, in a list for duplicates. + rValues := fieldpath.MakePathElementMap(rLen) + for i := 0; i < rLen; i++ { + rValue := rhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, rValue) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if v, found := rValues.Get(pe); found { + list := v.([]value.Value) + rValues.Insert(pe, append(list, rValue)) + } else { + rValues.Insert(pe, []value.Value{rValue}) + if _, found := lValues.Get(pe); !found { + allPEs = append(allPEs, pe) + } + } + } + + for _, pe := range allPEs { + lList := []value.Value(nil) + if l, ok := lValues.Get(pe); ok { + lList = l.([]value.Value) + } + rList := []value.Value(nil) + if l, ok := rValues.Get(pe); ok { + rList = l.([]value.Value) + } + + switch { + case len(lList) == 0 && len(rList) == 0: + // We shouldn't be here anyway. + return + // Normal use-case: + // We have no duplicates for this PE, compare items one-to-one. + case len(lList) <= 1 && len(rList) <= 1: + lValue := value.Value(nil) + if len(lList) != 0 { + lValue = lList[0] + } + rValue := value.Value(nil) + if len(rList) != 0 { + rValue = rList[0] + } + errs = append(errs, w.compareListItem(t, pe, lValue, rValue)...) + // Duplicates before & after use-case: + // Compare the duplicates lists as if they were atomic, mark modified if they changed. + case len(lList) >= 2 && len(rList) >= 2: + listEqual := func(lList, rList []value.Value) bool { + if len(lList) != len(rList) { + return false + } + for i := range lList { + if !value.Equals(lList[i], rList[i]) { + return false + } + } + return true + } + if !listEqual(lList, rList) { + w.comparison.Modified.Insert(append(w.path, pe)) + } + // Duplicates before & not anymore use-case: + // Rcursively add new non-duplicate items, Remove duplicate marker, + case len(lList) >= 2: + if len(rList) != 0 { + errs = append(errs, w.compareListItem(t, pe, nil, rList[0])...) + } + w.comparison.Removed.Insert(append(w.path, pe)) + // New duplicates use-case: + // Recursively remove old non-duplicate items, add duplicate marker. + case len(rList) >= 2: + if len(lList) != 0 { + errs = append(errs, w.compareListItem(t, pe, lList[0], nil)...) + } + w.comparison.Added.Insert(append(w.path, pe)) + } + } + + return +} + +func (w *compareWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { + var errs ValidationErrors + length := 0 + if list != nil { + length = list.Length() + } + observed := fieldpath.MakePathElementValueMap(length) + pes := make([]fieldpath.PathElement, 0, length) + for i := 0; i < length; i++ { + child := list.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + // Ignore repeated occurences of `pe`. + if _, found := observed.Get(pe); found { + continue + } + observed.Insert(pe, child) + pes = append(pes, pe) + } + return pes, observed, errs +} + +func (w *compareWalker) compareListItem(t *schema.List, pe fieldpath.PathElement, lChild, rChild value.Value) ValidationErrors { + w2 := w.prepareDescent(pe, t.ElementType, w.comparison) + w2.lhs = lChild + w2.rhs = rChild + errs := w2.compare(pe.String) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) { + if v == nil { + return nil, nil + } + l, err := listValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return l, nil +} + +func (w *compareWalker) doList(t *schema.List) (errs ValidationErrors) { + lhs, _ := w.derefList("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefList("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Length() == 0) && (rhs == nil || rhs.Length() == 0) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = w.visitListItems(t, lhs, rhs) + + return errs +} + +func (w *compareWalker) visitMapItem(t *schema.Map, out map[string]interface{}, key string, lhs, rhs value.Value) (errs ValidationErrors) { + fieldType := t.ElementType + if sf, ok := t.FindField(key); ok { + fieldType = sf.Type + } + pe := fieldpath.PathElement{FieldName: &key} + w2 := w.prepareDescent(pe, fieldType, w.comparison) + w2.lhs = lhs + w2.rhs = rhs + errs = append(errs, w2.compare(pe.String)...) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs ValidationErrors) { + out := map[string]interface{}{} + + value.MapZipUsing(w.allocator, lhs, rhs, value.Unordered, func(key string, lhsValue, rhsValue value.Value) bool { + errs = append(errs, w.visitMapItem(t, out, key, lhsValue, rhsValue)...) + return true + }) + + return errs +} + +func (w *compareWalker) doMap(t *schema.Map) (errs ValidationErrors) { + lhs, _ := w.derefMap("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefMap("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = append(errs, w.visitMapItems(t, lhs, rhs)...) + + return errs +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go index 19c77334f6..78fdb0e75f 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go @@ -197,7 +197,7 @@ func getAssociativeKeyDefault(s *schema.Schema, list *schema.List, fieldName str return field.Default, nil } -func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} if child.IsNull() { // null entries are illegal. @@ -225,7 +225,7 @@ func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, return pe, nil } -func setItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func setItemToPathElement(child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} switch { case child.IsMap(): @@ -245,16 +245,15 @@ func setItemToPathElement(list *schema.List, index int, child value.Value) (fiel } } -func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { - if list.ElementRelationship == schema.Associative { - if len(list.Keys) > 0 { - return keyedAssociativeListItemToPathElement(a, s, list, index, child) - } +func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { + if list.ElementRelationship != schema.Associative { + return fieldpath.PathElement{}, errors.New("invalid indexing of non-associative list") + } - // If there's no keys, then we must be a set of primitives. - return setItemToPathElement(list, index, child) + if len(list.Keys) > 0 { + return keyedAssociativeListItemToPathElement(a, s, list, child) } - // Use the index as a key for atomic lists. - return fieldpath.PathElement{Index: &index}, nil + // If there's no keys, then we must be a set of primitives. + return setItemToPathElement(child) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go index 9136440830..fa227ac405 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go @@ -113,11 +113,12 @@ func (w *mergingWalker) doLeaf() { w.rule(w) } -func (w *mergingWalker) doScalar(t *schema.Scalar) (errs ValidationErrors) { - errs = append(errs, validateScalar(t, w.lhs, "lhs: ")...) - errs = append(errs, validateScalar(t, w.rhs, "rhs: ")...) - if len(errs) > 0 { - return errs +func (w *mergingWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) } // All scalars are leaf fields. @@ -179,14 +180,18 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } out := make([]interface{}, 0, outLen) - rhsOrder, observedRHS, rhsErrs := w.indexListPathElements(t, rhs) + rhsPEs, observedRHS, rhsErrs := w.indexListPathElements(t, rhs, false) errs = append(errs, rhsErrs...) - lhsOrder, observedLHS, lhsErrs := w.indexListPathElements(t, lhs) + lhsPEs, observedLHS, lhsErrs := w.indexListPathElements(t, lhs, true) errs = append(errs, lhsErrs...) + if len(errs) != 0 { + return errs + } + sharedOrder := make([]*fieldpath.PathElement, 0, rLen) - for i := range rhsOrder { - pe := &rhsOrder[i] + for i := range rhsPEs { + pe := &rhsPEs[i] if _, ok := observedLHS.Get(*pe); ok { sharedOrder = append(sharedOrder, pe) } @@ -198,13 +203,15 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err sharedOrder = sharedOrder[1:] } - lLen, rLen = len(lhsOrder), len(rhsOrder) + mergedRHS := fieldpath.MakePathElementMap(len(rhsPEs)) + lLen, rLen = len(lhsPEs), len(rhsPEs) for lI, rI := 0, 0; lI < lLen || rI < rLen; { if lI < lLen && rI < rLen { - pe := lhsOrder[lI] - if pe.Equals(rhsOrder[rI]) { + pe := lhsPEs[lI] + if pe.Equals(rhsPEs[rI]) { // merge LHS & RHS items - lChild, _ := observedLHS.Get(pe) + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if the PE is duplicaated. rChild, _ := observedRHS.Get(pe) mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) errs = append(errs, errs...) @@ -221,17 +228,17 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } continue } - if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsOrder[lI]) { + if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsPEs[lI]) { // shared item, but not the one we want in this round lI++ continue } } if lI < lLen { - pe := lhsOrder[lI] + pe := lhsPEs[lI] if _, ok := observedRHS.Get(pe); !ok { - // take LHS item - lChild, _ := observedLHS.Get(pe) + // take LHS item using At to make sure we get the right item (observed may not contain the right item). + lChild := lhs.AtUsing(w.allocator, lI) mergeOut, errs := w.mergeListItem(t, pe, lChild, nil) errs = append(errs, errs...) if mergeOut != nil { @@ -239,12 +246,16 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } lI++ continue + } else if _, ok := mergedRHS.Get(pe); ok { + // we've already merged it with RHS, we don't want to duplicate it, skip it. + lI++ } } if rI < rLen { // Take the RHS item, merge with matching LHS item if possible - pe := rhsOrder[rI] - lChild, _ := observedLHS.Get(pe) // may be nil + pe := rhsPEs[rI] + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if absent or duplicaated. rChild, _ := observedRHS.Get(pe) mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) errs = append(errs, errs...) @@ -271,7 +282,7 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err return errs } -func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { +func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List, allowDuplicates bool) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { var errs ValidationErrors length := 0 if list != nil { @@ -281,7 +292,7 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ( pes := make([]fieldpath.PathElement, 0, length) for i := 0; i < length; i++ { child := list.At(i) - pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -289,11 +300,15 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ( // this element. continue } - if _, found := observed.Get(pe); found { + if _, found := observed.Get(pe); found && !allowDuplicates { errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) continue + } else if !found { + observed.Insert(pe, child) + } else { + // Duplicated items are not merged with the new value, make them nil. + observed.Insert(pe, value.NewValueInterface(nil)) } - observed.Insert(pe, child) pes = append(pes, pe) } return pes, observed, errs diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go index 3949a78fc6..4258ee5bab 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go @@ -93,13 +93,13 @@ func (p ParseableType) IsValid() bool { // FromYAML parses a yaml string into an object with the current schema // and the type "typename" or an error if validation fails. -func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { +func (p ParseableType) FromYAML(object YAMLObject, opts ...ValidationOptions) (*TypedValue, error) { var v interface{} err := yaml.Unmarshal([]byte(object), &v) if err != nil { return nil, err } - return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef) + return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef, opts...) } // FromUnstructured converts a go "interface{}" type, typically an @@ -108,8 +108,8 @@ func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { // The provided interface{} must be one of: map[string]interface{}, // map[interface{}]interface{}, []interface{}, int types, float types, // string or boolean. Nested interface{} must also be one of these types. -func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { - return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef) +func (p ParseableType) FromUnstructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { + return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef, opts...) } // FromStructured converts a go "interface{}" type, typically an structured object in @@ -117,12 +117,12 @@ func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { // schema validation. The provided "interface{}" value must be a pointer so that the // value can be modified via reflection. The provided "interface{}" may contain structs // and types that are converted to Values by the jsonMarshaler interface. -func (p ParseableType) FromStructured(in interface{}) (*TypedValue, error) { +func (p ParseableType) FromStructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { v, err := value.NewValueReflect(in) if err != nil { return nil, fmt.Errorf("error creating struct value reflector: %v", err) } - return AsTyped(v, p.Schema, p.TypeRef) + return AsTyped(v, p.Schema, p.TypeRef, opts...) } // DeducedParseableType is a ParseableType that deduces the type from diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go index a338d761d4..ad071ee8f3 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go @@ -74,9 +74,9 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { iter := l.RangeUsing(w.allocator) defer w.allocator.Free(iter) for iter.Next() { - i, item := iter.Item() + _, item := iter.Item() // Ignore error because we have already validated this list - pe, _ := listItemToPathElement(w.allocator, w.schema, t, i, item) + pe, _ := listItemToPathElement(w.allocator, w.schema, t, item) path, _ := fieldpath.MakePath(pe) // save items on the path when we shouldExtract // but ignore them when we are removing (i.e. !w.shouldExtract) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go index 047efff053..d563a87ee6 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go @@ -94,9 +94,31 @@ func (v *toFieldSetWalker) doScalar(t *schema.Scalar) ValidationErrors { } func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { + // Keeps track of the PEs we've seen + seen := fieldpath.MakePathElementSet(list.Length()) + // Keeps tracks of the PEs we've counted as duplicates + duplicates := fieldpath.MakePathElementSet(list.Length()) for i := 0; i < list.Length(); i++ { child := list.At(i) - pe, _ := listItemToPathElement(v.allocator, v.schema, t, i, child) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if seen.Has(pe) { + if duplicates.Has(pe) { + // do nothing + } else { + v.set.Insert(append(v.path, pe)) + duplicates.Insert(pe) + } + } else { + seen.Insert(pe) + } + } + + for i := 0; i < list.Length(); i++ { + child := list.At(i) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if duplicates.Has(pe) { + continue + } v2 := v.prepareDescent(pe, t.ElementType) v2.value = child errs = append(errs, v2.toFieldSet()...) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go index d63a97fe20..9be9028280 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go @@ -17,8 +17,6 @@ limitations under the License. package typed import ( - "fmt" - "strings" "sync" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" @@ -26,16 +24,24 @@ import ( "sigs.k8s.io/structured-merge-diff/v4/value" ) +// ValidationOptions is the list of all the options available when running the validation. +type ValidationOptions int + +const ( + // AllowDuplicates means that sets and associative lists can have duplicate similar items. + AllowDuplicates ValidationOptions = iota +) + // AsTyped accepts a value and a type and returns a TypedValue. 'v' must have // type 'typeName' in the schema. An error is returned if the v doesn't conform // to the schema. -func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedValue, error) { +func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef, opts ...ValidationOptions) (*TypedValue, error) { tv := &TypedValue{ value: v, typeRef: typeRef, schema: s, } - if err := tv.Validate(); err != nil { + if err := tv.Validate(opts...); err != nil { return nil, err } return tv, nil @@ -45,6 +51,10 @@ func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedVal // conforms to the schema, for cases where that has already been checked or // where you're going to call a method that validates as a side-effect (like // ToFieldSet). +// +// Deprecated: This function was initially created because validation +// was expensive. Now that this has been solved, objects should always +// be created as validated, using `AsTyped`. func AsTypedUnvalidated(v value.Value, s *schema.Schema, typeRef schema.TypeRef) *TypedValue { tv := &TypedValue{ value: v, @@ -77,8 +87,14 @@ func (tv TypedValue) Schema() *schema.Schema { } // Validate returns an error with a list of every spec violation. -func (tv TypedValue) Validate() error { +func (tv TypedValue) Validate(opts ...ValidationOptions) error { w := tv.walker() + for _, opt := range opts { + switch opt { + case AllowDuplicates: + w.allowDuplicates = true + } + } defer w.finished() if errs := w.validate(nil); len(errs) != 0 { return errs @@ -113,6 +129,10 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { return merge(&tv, pso, ruleKeepRHS, nil) } +var cmpwPool = sync.Pool{ + New: func() interface{} { return &compareWalker{} }, +} + // Compare compares the two objects. See the comments on the `Comparison` // struct for details on the return value. // @@ -120,33 +140,44 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { // match), or an error will be returned. Validation errors will be returned if // the objects don't conform to the schema. func (tv TypedValue) Compare(rhs *TypedValue) (c *Comparison, err error) { - c = &Comparison{ + lhs := tv + if lhs.schema != rhs.schema { + return nil, errorf("expected objects with types from the same schema") + } + if !lhs.typeRef.Equals(&rhs.typeRef) { + return nil, errorf("expected objects of the same type, but got %v and %v", lhs.typeRef, rhs.typeRef) + } + + cmpw := cmpwPool.Get().(*compareWalker) + defer func() { + cmpw.lhs = nil + cmpw.rhs = nil + cmpw.schema = nil + cmpw.typeRef = schema.TypeRef{} + cmpw.comparison = nil + cmpw.inLeaf = false + + cmpwPool.Put(cmpw) + }() + + cmpw.lhs = lhs.value + cmpw.rhs = rhs.value + cmpw.schema = lhs.schema + cmpw.typeRef = lhs.typeRef + cmpw.comparison = &Comparison{ Removed: fieldpath.NewSet(), Modified: fieldpath.NewSet(), Added: fieldpath.NewSet(), } - _, err = merge(&tv, rhs, func(w *mergingWalker) { - if w.lhs == nil { - c.Added.Insert(w.path) - } else if w.rhs == nil { - c.Removed.Insert(w.path) - } else if !value.Equals(w.rhs, w.lhs) { - // TODO: Equality is not sufficient for this. - // Need to implement equality check on the value type. - c.Modified.Insert(w.path) - } - }, func(w *mergingWalker) { - if w.lhs == nil { - c.Added.Insert(w.path) - } else if w.rhs == nil { - c.Removed.Insert(w.path) - } - }) - if err != nil { - return nil, err + if cmpw.allocator == nil { + cmpw.allocator = value.NewFreelistAllocator() } - return c, nil + errs := cmpw.compare(nil) + if len(errs) > 0 { + return nil, errs + } + return cmpw.comparison, nil } // RemoveItems removes each provided list or map item from the value. @@ -161,63 +192,6 @@ func (tv TypedValue) ExtractItems(items *fieldpath.Set) *TypedValue { return &tv } -// NormalizeUnions takes the new object and normalizes the union: -// - If discriminator changed to non-nil, and a new field has been added -// that doesn't match, an error is returned, -// - If discriminator hasn't changed and two fields or more are set, an -// error is returned, -// - If discriminator changed to non-nil, all other fields but the -// discriminated one will be cleared, -// - Otherwise, If only one field is left, update discriminator to that value. -// -// Please note: union behavior isn't finalized yet and this is still experimental. -func (tv TypedValue) NormalizeUnions(new *TypedValue) (*TypedValue, error) { - var errs ValidationErrors - var normalizeFn = func(w *mergingWalker) { - if w.rhs != nil { - v := w.rhs.Unstructured() - w.out = &v - } - if err := normalizeUnions(w); err != nil { - errs = append(errs, errorf(err.Error())...) - } - } - out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) - if mergeErrs != nil { - errs = append(errs, mergeErrs.(ValidationErrors)...) - } - if len(errs) > 0 { - return nil, errs - } - return out, nil -} - -// NormalizeUnionsApply specifically normalize unions on apply. It -// validates that the applied union is correct (there should be no -// ambiguity there), and clear the fields according to the sent intent. -// -// Please note: union behavior isn't finalized yet and this is still experimental. -func (tv TypedValue) NormalizeUnionsApply(new *TypedValue) (*TypedValue, error) { - var errs ValidationErrors - var normalizeFn = func(w *mergingWalker) { - if w.rhs != nil { - v := w.rhs.Unstructured() - w.out = &v - } - if err := normalizeUnionsApply(w); err != nil { - errs = append(errs, errorf(err.Error())...) - } - } - out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) - if mergeErrs != nil { - errs = append(errs, mergeErrs.(ValidationErrors)...) - } - if len(errs) > 0 { - return nil, errs - } - return out, nil -} - func (tv TypedValue) Empty() *TypedValue { tv.value = value.NewValueInterface(nil) return &tv @@ -273,50 +247,3 @@ func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error) } return out, nil } - -// Comparison is the return value of a TypedValue.Compare() operation. -// -// No field will appear in more than one of the three fieldsets. If all of the -// fieldsets are empty, then the objects must have been equal. -type Comparison struct { - // Removed contains any fields removed by rhs (the right-hand-side - // object in the comparison). - Removed *fieldpath.Set - // Modified contains fields present in both objects but different. - Modified *fieldpath.Set - // Added contains any fields added by rhs. - Added *fieldpath.Set -} - -// IsSame returns true if the comparison returned no changes (the two -// compared objects are similar). -func (c *Comparison) IsSame() bool { - return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() -} - -// String returns a human readable version of the comparison. -func (c *Comparison) String() string { - bld := strings.Builder{} - if !c.Modified.Empty() { - bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) - } - if !c.Added.Empty() { - bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) - } - if !c.Removed.Empty() { - bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) - } - return bld.String() -} - -// ExcludeFields fields from the compare recursively removes the fields -// from the entire comparison -func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { - if fields == nil || fields.Empty() { - return c - } - c.Removed = c.Removed.RecursiveDifference(fields) - c.Modified = c.Modified.RecursiveDifference(fields) - c.Added = c.Added.RecursiveDifference(fields) - return c -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go deleted file mode 100644 index 1fa5d88ae6..0000000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go +++ /dev/null @@ -1,276 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package typed - -import ( - "fmt" - "strings" - - "sigs.k8s.io/structured-merge-diff/v4/schema" - "sigs.k8s.io/structured-merge-diff/v4/value" -) - -func normalizeUnions(w *mergingWalker) error { - atom, found := w.schema.Resolve(w.typeRef) - if !found { - panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) - } - // Unions can only be in structures, and the struct must not have been removed - if atom.Map == nil || w.out == nil { - return nil - } - - var old value.Map - if w.lhs != nil && !w.lhs.IsNull() { - old = w.lhs.AsMap() - } - for _, union := range atom.Map.Unions { - if err := newUnion(&union).Normalize(old, w.rhs.AsMap(), value.NewValueInterface(*w.out).AsMap()); err != nil { - return err - } - } - return nil -} - -func normalizeUnionsApply(w *mergingWalker) error { - atom, found := w.schema.Resolve(w.typeRef) - if !found { - panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) - } - // Unions can only be in structures, and the struct must not have been removed - if atom.Map == nil || w.out == nil { - return nil - } - - var old value.Map - if w.lhs != nil && !w.lhs.IsNull() { - old = w.lhs.AsMap() - } - - for _, union := range atom.Map.Unions { - out := value.NewValueInterface(*w.out) - if err := newUnion(&union).NormalizeApply(old, w.rhs.AsMap(), out.AsMap()); err != nil { - return err - } - *w.out = out.Unstructured() - } - return nil -} - -type discriminated string -type field string - -type discriminatedNames struct { - f2d map[field]discriminated - d2f map[discriminated]field -} - -func newDiscriminatedName(f2d map[field]discriminated) discriminatedNames { - d2f := map[discriminated]field{} - for key, value := range f2d { - d2f[value] = key - } - return discriminatedNames{ - f2d: f2d, - d2f: d2f, - } -} - -func (dn discriminatedNames) toField(d discriminated) field { - if f, ok := dn.d2f[d]; ok { - return f - } - return field(d) -} - -func (dn discriminatedNames) toDiscriminated(f field) discriminated { - if d, ok := dn.f2d[f]; ok { - return d - } - return discriminated(f) -} - -type discriminator struct { - name string -} - -func (d *discriminator) Set(m value.Map, v discriminated) { - if d == nil { - return - } - m.Set(d.name, value.NewValueInterface(string(v))) -} - -func (d *discriminator) Get(m value.Map) discriminated { - if d == nil || m == nil { - return "" - } - val, ok := m.Get(d.name) - if !ok { - return "" - } - if !val.IsString() { - return "" - } - return discriminated(val.AsString()) -} - -type fieldsSet map[field]struct{} - -// newFieldsSet returns a map of the fields that are part of the union and are set -// in the given map. -func newFieldsSet(m value.Map, fields []field) fieldsSet { - if m == nil { - return nil - } - set := fieldsSet{} - for _, f := range fields { - if subField, ok := m.Get(string(f)); ok && !subField.IsNull() { - set.Add(f) - } - } - return set -} - -func (fs fieldsSet) Add(f field) { - if fs == nil { - fs = map[field]struct{}{} - } - fs[f] = struct{}{} -} - -func (fs fieldsSet) One() *field { - for f := range fs { - return &f - } - return nil -} - -func (fs fieldsSet) Has(f field) bool { - _, ok := fs[f] - return ok -} - -func (fs fieldsSet) List() []field { - fields := []field{} - for f := range fs { - fields = append(fields, f) - } - return fields -} - -func (fs fieldsSet) Difference(o fieldsSet) fieldsSet { - n := fieldsSet{} - for f := range fs { - if !o.Has(f) { - n.Add(f) - } - } - return n -} - -func (fs fieldsSet) String() string { - s := []string{} - for k := range fs { - s = append(s, string(k)) - } - return strings.Join(s, ", ") -} - -type union struct { - deduceInvalidDiscriminator bool - d *discriminator - dn discriminatedNames - f []field -} - -func newUnion(su *schema.Union) *union { - u := &union{} - if su.Discriminator != nil { - u.d = &discriminator{name: *su.Discriminator} - } - f2d := map[field]discriminated{} - for _, f := range su.Fields { - u.f = append(u.f, field(f.FieldName)) - f2d[field(f.FieldName)] = discriminated(f.DiscriminatorValue) - } - u.dn = newDiscriminatedName(f2d) - u.deduceInvalidDiscriminator = su.DeduceInvalidDiscriminator - return u -} - -// clear removes all the fields in map that are part of the union, but -// the one we decided to keep. -func (u *union) clear(m value.Map, f field) { - for _, fieldName := range u.f { - if field(fieldName) != f { - m.Delete(string(fieldName)) - } - } -} - -func (u *union) Normalize(old, new, out value.Map) error { - os := newFieldsSet(old, u.f) - ns := newFieldsSet(new, u.f) - diff := ns.Difference(os) - - if u.d.Get(old) != u.d.Get(new) && u.d.Get(new) != "" { - if len(diff) == 1 && u.d.Get(new) != u.dn.toDiscriminated(*diff.One()) { - return fmt.Errorf("discriminator (%v) and field changed (%v) don't match", u.d.Get(new), diff.One()) - } - if len(diff) > 1 { - return fmt.Errorf("multiple new fields added: %v", diff) - } - u.clear(out, u.dn.toField(u.d.Get(new))) - return nil - } - - if len(ns) > 1 { - return fmt.Errorf("multiple fields set without discriminator change: %v", ns) - } - - // Set discriminiator if it needs to be deduced. - if u.deduceInvalidDiscriminator && len(ns) == 1 { - u.d.Set(out, u.dn.toDiscriminated(*ns.One())) - } - - return nil -} - -func (u *union) NormalizeApply(applied, merged, out value.Map) error { - as := newFieldsSet(applied, u.f) - if len(as) > 1 { - return fmt.Errorf("more than one field of union applied: %v", as) - } - if len(as) == 0 { - // None is set, just leave. - return nil - } - // We have exactly one, discriminiator must match if set - if u.d.Get(applied) != "" && u.d.Get(applied) != u.dn.toDiscriminated(*as.One()) { - return fmt.Errorf("applied discriminator (%v) doesn't match applied field (%v)", u.d.Get(applied), *as.One()) - } - - // Update discriminiator if needed - if u.deduceInvalidDiscriminator { - u.d.Set(out, u.dn.toDiscriminated(*as.One())) - } - // Clear others fields. - u.clear(out, *as.One()) - - return nil -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go index 378d30219c..652e24c819 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go @@ -33,6 +33,7 @@ func (tv TypedValue) walker() *validatingObjectWalker { v.value = tv.value v.schema = tv.schema v.typeRef = tv.typeRef + v.allowDuplicates = false if v.allocator == nil { v.allocator = value.NewFreelistAllocator() } @@ -49,6 +50,9 @@ type validatingObjectWalker struct { value value.Value schema *schema.Schema typeRef schema.TypeRef + // If set to true, duplicates will be allowed in + // associativeLists/sets. + allowDuplicates bool // Allocate only as many walkers as needed for the depth by storing them here. spareWalkers *[]*validatingObjectWalker @@ -102,6 +106,12 @@ func validateScalar(t *schema.Scalar, v value.Value, prefix string) (errs Valida if !v.IsBool() { return errorf("%vexpected boolean, got %v", prefix, v) } + case schema.Untyped: + if !v.IsFloat() && !v.IsInt() && !v.IsString() && !v.IsBool() { + return errorf("%vexpected any scalar, got %v", prefix, v) + } + default: + return errorf("%vunexpected scalar type in schema: %v", prefix, *t) } return nil } @@ -123,7 +133,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) pe.Index = &i } else { var err error - pe, err = listItemToPathElement(v.allocator, v.schema, t, i, child) + pe, err = listItemToPathElement(v.allocator, v.schema, t, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -131,7 +141,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) // this element. return } - if observedKeys.Has(pe) { + if observedKeys.Has(pe) && !v.allowDuplicates { errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) } observedKeys.Insert(pe) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go index dc8b8c7200..c38402b99a 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go @@ -136,7 +136,7 @@ func (r mapReflect) EqualsUsing(a Allocator, m Map) bool { if !ok { return false } - return Equals(vr.mustReuse(lhsVal, entry, nil, nil), value) + return EqualsUsing(a, vr.mustReuse(lhsVal, entry, nil, nil), value) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go index d8e208628d..c3ae00b180 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go @@ -88,12 +88,12 @@ func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool { } vv := a.allocValueUnstructured() defer a.Free(vv) - return other.Iterate(func(key string, value Value) bool { + return other.IterateUsing(a, func(key string, value Value) bool { lhsVal, ok := m[key] if !ok { return false } - return Equals(vv.reuse(lhsVal), value) + return EqualsUsing(a, vv.reuse(lhsVal), value) }) } @@ -168,12 +168,12 @@ func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool { } vv := a.allocValueUnstructured() defer a.Free(vv) - return other.Iterate(func(key string, value Value) bool { + return other.IterateUsing(a, func(key string, value Value) bool { lhsVal, ok := m[key] if !ok { return false } - return Equals(vv.reuse(lhsVal), value) + return EqualsUsing(a, vv.reuse(lhsVal), value) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go index a5a467c0f0..f0d58d42cb 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go @@ -154,7 +154,9 @@ func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fi if field.Type.Kind() == reflect.Ptr { e = field.Type.Elem() } - buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) + if e.Kind() == reflect.Struct { + buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) + } continue } info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type} diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE index 7805d36de7..093d6d3edf 100644 --- a/vendor/sigs.k8s.io/yaml/LICENSE +++ b/vendor/sigs.k8s.io/yaml/LICENSE @@ -48,3 +48,259 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# The forked go-yaml.v3 library under this project is covered by two +different licenses (MIT and Apache): + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +# The forked go-yaml.v2 library under the project is covered by an +Apache license: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS index 325b40b076..003a149e15 100644 --- a/vendor/sigs.k8s.io/yaml/OWNERS +++ b/vendor/sigs.k8s.io/yaml/OWNERS @@ -2,26 +2,22 @@ approvers: - dims -- lavalamp +- jpbetz - smarterclayton - deads2k - sttts - liggitt -- caesarxuchao reviewers: - dims - thockin -- lavalamp +- jpbetz - smarterclayton - wojtek-t - deads2k - derekwaynecarr -- caesarxuchao - mikedanese - liggitt -- gmarek - sttts -- ncdc - tallclair labels: - sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/fields.go b/vendor/sigs.k8s.io/yaml/fields.go index 235b7f2cf6..0ea28bd030 100644 --- a/vendor/sigs.k8s.io/yaml/fields.go +++ b/vendor/sigs.k8s.io/yaml/fields.go @@ -16,53 +16,53 @@ import ( "unicode/utf8" ) -// indirect walks down v allocating pointers as needed, +// indirect walks down 'value' allocating pointers as needed, // until it gets to a non-pointer. // if it encounters an Unmarshaler, indirect stops and returns that. // if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, +func indirect(value reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If 'value' is a named type and is addressable, // start with its address, so that if the type has pointer methods, // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() + if value.Kind() != reflect.Ptr && value.Type().Name() != "" && value.CanAddr() { + value = value.Addr() } for { // Load value from interface, but only if the result will be // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e + if value.Kind() == reflect.Interface && !value.IsNil() { + element := value.Elem() + if element.Kind() == reflect.Ptr && !element.IsNil() && (!decodingNull || element.Elem().Kind() == reflect.Ptr) { + value = element continue } } - if v.Kind() != reflect.Ptr { + if value.Kind() != reflect.Ptr { break } - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + if value.Elem().Kind() != reflect.Ptr && decodingNull && value.CanSet() { break } - if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) + if value.IsNil() { + if value.CanSet() { + value.Set(reflect.New(value.Type().Elem())) } else { - v = reflect.New(v.Type().Elem()) + value = reflect.New(value.Type().Elem()) } } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { + if value.Type().NumMethod() > 0 { + if u, ok := value.Interface().(json.Unmarshaler); ok { return u, nil, reflect.Value{} } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + if u, ok := value.Interface().(encoding.TextUnmarshaler); ok { return nil, u, reflect.Value{} } } - v = v.Elem() + value = value.Elem() } - return nil, nil, v + return nil, nil, value } // A field represents a single field found in a struct. @@ -134,8 +134,8 @@ func typeFields(t reflect.Type) []field { next := []field{{typ: t}} // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} + var count map[reflect.Type]int + var nextCount map[reflect.Type]int // Types already visited at an earlier level. visited := map[reflect.Type]bool{} @@ -348,8 +348,9 @@ const ( // 4) simpleLetterEqualFold, no specials, no non-letters. // // The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign +// - S maps to s and to U+017F 'ſ' Latin small letter long s +// - k maps to K and to U+212A 'K' Kelvin sign +// // See http://play.golang.org/p/tTxjOc0OGo // // The returned function is specialized for matching against s and @@ -420,10 +421,8 @@ func equalFoldRight(s, t []byte) bool { t = t[size:] } - if len(t) > 0 { - return false - } - return true + + return len(t) <= 0 } // asciiEqualFold is a specialization of bytes.EqualFold for use when diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml new file mode 100644 index 0000000000..8da58fbf6f --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE new file mode 100644 index 0000000000..866d74a7ad --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS new file mode 100644 index 0000000000..73be0a3a9b --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS @@ -0,0 +1,24 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- dims +- jpbetz +- smarterclayton +- deads2k +- sttts +- liggitt +- natasha41575 +- knverey +reviewers: +- dims +- thockin +- jpbetz +- smarterclayton +- deads2k +- derekwaynecarr +- mikedanese +- liggitt +- sttts +- tallclair +labels: +- sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md new file mode 100644 index 0000000000..53f4139dc3 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md @@ -0,0 +1,143 @@ +# go-yaml fork + +This package is a fork of the go-yaml library and is intended solely for consumption +by kubernetes projects. In this fork, we plan to support only critical changes required for +kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests +should be made in the upstream go-yaml library, and we will reject such changes in this fork +unless we are pulling them from upstream. + +This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0 + +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go new file mode 100644 index 0000000000..acf71402cf --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go @@ -0,0 +1,744 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +var disableLineWrapping = false + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + if disableLineWrapping { + emitter.best_width = -1 + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go new file mode 100644 index 0000000000..129bc2a97d --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go new file mode 100644 index 0000000000..a1c2cc5262 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go new file mode 100644 index 0000000000..0ee738e11b --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go new file mode 100644 index 0000000000..81d05dfe57 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected <document start>", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go new file mode 100644 index 0000000000..7c1f5fac3d --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go new file mode 100644 index 0000000000..4120e0c916 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go new file mode 100644 index 0000000000..0b9bb6030a --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go new file mode 100644 index 0000000000..4c45e660a8 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go new file mode 100644 index 0000000000..a2dde608cb --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go new file mode 100644 index 0000000000..30813884c0 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go @@ -0,0 +1,478 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go new file mode 100644 index 0000000000..f6a9c8e34b --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "<unknown token>" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "<unknown parser state>" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go new file mode 100644 index 0000000000..8110ce3c37 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go index efbc535d41..fc10246bdb 100644 --- a/vendor/sigs.k8s.io/yaml/yaml.go +++ b/vendor/sigs.k8s.io/yaml/yaml.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package yaml import ( @@ -8,56 +24,59 @@ import ( "reflect" "strconv" - "gopkg.in/yaml.v2" + "sigs.k8s.io/yaml/goyaml.v2" ) -// Marshal marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) +// Marshal marshals obj into JSON using stdlib json.Marshal, and then converts JSON to YAML using JSONToYAML (see that method for more reference) +func Marshal(obj interface{}) ([]byte, error) { + jsonBytes, err := json.Marshal(obj) if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) + return nil, fmt.Errorf("error marshaling into JSON: %w", err) } - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil + return JSONToYAML(jsonBytes) } // JSONOpt is a decoding option for decoding from JSON format. type JSONOpt func(*json.Decoder) *json.Decoder -// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, -// optionally configuring the behavior of the JSON unmarshal. -func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { - return yamlUnmarshal(y, o, false, opts...) +// Unmarshal first converts the given YAML to JSON, and then unmarshals the JSON into obj. Options for the +// standard library json.Decoder can be optionally specified, e.g. to decode untyped numbers into json.Number instead of float64, or to disallow unknown fields (but for that purpose, see also UnmarshalStrict). obj must be a non-nil pointer. +// +// Important notes about the Unmarshal logic: +// +// - Decoding is case-insensitive, unlike the rest of Kubernetes API machinery, as this is using the stdlib json library. This might be confusing to users. +// - This decodes any number (although it is an integer) into a float64 if the type of obj is unknown, e.g. *map[string]interface{}, *interface{}, or *[]interface{}. This means integers above +/- 2^53 will lose precision when round-tripping. Make a JSONOpt that calls d.UseNumber() to avoid this. +// - Duplicate fields, including in-case-sensitive matches, are ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See UnmarshalStrict for an alternative. +// - Unknown fields, i.e. serialized data that do not map to a field in obj, are ignored. Use d.DisallowUnknownFields() or UnmarshalStrict to override. +// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly. +// - YAML non-string keys, e.g. ints, bools and floats, are converted to strings implicitly during the YAML to JSON conversion process. +// - There are no compatibility guarantees for returned error values. +func Unmarshal(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error { + return unmarshal(yamlBytes, obj, yaml.Unmarshal, opts...) } -// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal -// into an object, optionally configuring the behavior of the JSON unmarshal. -func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { - return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...) +// UnmarshalStrict is similar to Unmarshal (please read its documentation for reference), with the following exceptions: +// +// - Duplicate fields in an object yield an error. This is according to the YAML specification. +// - If obj, or any of its recursive children, is a struct, presence of fields in the serialized data unknown to the struct will yield an error. +func UnmarshalStrict(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error { + return unmarshal(yamlBytes, obj, yaml.UnmarshalStrict, append(opts, DisallowUnknownFields)...) } -// yamlUnmarshal unmarshals the given YAML byte stream into the given interface, +// unmarshal unmarshals the given YAML byte stream into the given interface, // optionally performing the unmarshalling strictly -func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error { - vo := reflect.ValueOf(o) - unmarshalFn := yaml.Unmarshal - if strict { - unmarshalFn = yaml.UnmarshalStrict - } - j, err := yamlToJSON(y, &vo, unmarshalFn) +func unmarshal(yamlBytes []byte, obj interface{}, unmarshalFn func([]byte, interface{}) error, opts ...JSONOpt) error { + jsonTarget := reflect.ValueOf(obj) + + jsonBytes, err := yamlToJSONTarget(yamlBytes, &jsonTarget, unmarshalFn) if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) + return fmt.Errorf("error converting YAML to JSON: %w", err) } - err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + err = jsonUnmarshal(bytes.NewReader(jsonBytes), obj, opts...) if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) + return fmt.Errorf("error unmarshaling JSON: %w", err) } return nil @@ -67,21 +86,26 @@ func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error // object, optionally applying decoder options prior to decoding. We are not // using json.Unmarshal directly as we want the chance to pass in non-default // options. -func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { - d := json.NewDecoder(r) +func jsonUnmarshal(reader io.Reader, obj interface{}, opts ...JSONOpt) error { + d := json.NewDecoder(reader) for _, opt := range opts { d = opt(d) } - if err := d.Decode(&o); err != nil { + if err := d.Decode(&obj); err != nil { return fmt.Errorf("while decoding JSON: %v", err) } return nil } -// JSONToYAML Converts JSON to YAML. +// JSONToYAML converts JSON to YAML. Notable implementation details: +// +// - Duplicate fields, are case-sensitively ignored in an undefined order. +// - The sequence indentation style is compact, which means that the "- " marker for a YAML sequence will be on the same indentation level as the sequence field name. +// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip. func JSONToYAML(j []byte) ([]byte, error) { // Convert the JSON to an object. var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the // Go JSON library doesn't try to pick the right number type (int, float, // etc.) when unmarshalling to interface{}, it just picks float64 @@ -93,35 +117,46 @@ func JSONToYAML(j []byte) ([]byte, error) { } // Marshal this object into YAML. - return yaml.Marshal(jsonObj) + yamlBytes, err := yaml.Marshal(jsonObj) + if err != nil { + return nil, err + } + + return yamlBytes, nil } // YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, // passing JSON through this method should be a no-op. // -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. +// Some things YAML can do that are not supported by JSON: +// - In YAML you can have binary and null keys in your maps. These are invalid +// in JSON, and therefore int, bool and float keys are converted to strings implicitly. +// - Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +// - And more... read the YAML specification for more details. +// +// Notable about the implementation: // -// For strict decoding of YAML, use YAMLToJSONStrict. +// - Duplicate fields are case-sensitively ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See YAMLToJSONStrict for an alternative. +// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly. +// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip. +// - There are no compatibility guarantees for returned error values. func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.Unmarshal) + return yamlToJSONTarget(y, nil, yaml.Unmarshal) } // YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, // returning an error on any duplicate field names. func YAMLToJSONStrict(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.UnmarshalStrict) + return yamlToJSONTarget(y, nil, yaml.UnmarshalStrict) } -func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { +func yamlToJSONTarget(yamlBytes []byte, jsonTarget *reflect.Value, unmarshalFn func([]byte, interface{}) error) ([]byte, error) { // Convert the YAML to an object. var yamlObj interface{} - err := yamlUnmarshal(y, &yamlObj) + err := unmarshalFn(yamlBytes, &yamlObj) if err != nil { return nil, err } @@ -136,7 +171,11 @@ func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, } // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) + jsonBytes, err := json.Marshal(jsonObj) + if err != nil { + return nil, err + } + return jsonBytes, nil } func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { @@ -147,13 +186,13 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in // decoding into the value, we're just checking if the ultimate target is a // string. if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) + jsonUnmarshaler, textUnmarshaler, pointerValue := indirect(*jsonTarget, false) // We have a JSON or Text Umarshaler at this level, so we can't be trying // to decode into a string. - if ju != nil || tu != nil { + if jsonUnmarshaler != nil || textUnmarshaler != nil { jsonTarget = nil } else { - jsonTarget = &pv + jsonTarget = &pointerValue } } @@ -205,7 +244,7 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in keyString = "false" } default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + return nil, fmt.Errorf("unsupported map key of type: %s, key: %+#v, value: %+#v", reflect.TypeOf(k), k, v) } diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go index ab3e06a222..94abc1719d 100644 --- a/vendor/sigs.k8s.io/yaml/yaml_go110.go +++ b/vendor/sigs.k8s.io/yaml/yaml_go110.go @@ -1,7 +1,24 @@ // This file contains changes that are only compatible with go 1.10 and onwards. +//go:build go1.10 // +build go1.10 +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package yaml import "encoding/json"